Example #1
0
    def handle(self, *args: Any, **options: str) -> None:
        queue_name = options['queue_name']
        file_name = options['file_name']

        if file_name == '-':
            f = sys.stdin  # type: IO[str]
        else:
            f = open(file_name)

        while True:
            line = f.readline()
            if not line:
                break

            line = line.strip()
            try:
                payload = line.split('\t')[1]
            except IndexError:
                payload = line

            print('Queueing to queue %s: %s' % (queue_name, payload))

            # Verify that payload is valid json.
            data = ujson.loads(payload)

            # This is designed to use the `error` method rather than
            # the call_consume_in_tests flow.
            queue_json_publish(queue_name, data, error)
Example #2
0
def missedmessage_hook(user_profile_id, queue, last_for_client):
    # Only process missedmessage hook when the last queue for a
    # client has been garbage collected
    if not last_for_client:
        return

    message_ids_to_notify = []
    for event in queue.event_queue.contents():
        if not event["type"] == "message" or not event["flags"]:
            continue

        if "mentioned" in event["flags"] and "read" not in event["flags"]:
            notify_info = dict(message_id=event["message"]["id"])

            if not event.get("push_notified", False):
                notify_info["send_push"] = True
            if not event.get("email_notified", False):
                notify_info["send_email"] = True
            message_ids_to_notify.append(notify_info)

    for notify_info in message_ids_to_notify:
        msg_id = notify_info["message_id"]
        notice = build_offline_notification(user_profile_id, msg_id)
        if notify_info.get("send_push", False):
            queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
        if notify_info.get("send_email", False):
            queue_json_publish("missedmessage_emails", notice, lambda notice: None)
Example #3
0
def missedmessage_hook(user_profile_id, queue, last_for_client):
    # Only process missedmessage hook when the last queue for a
    # client has been garbage collected
    if not last_for_client:
        return

    message_ids_to_notify = []
    for event in queue.event_queue.contents():
        if not event['type'] == 'message' or not event['flags']:
            continue

        if 'mentioned' in event['flags'] and not 'read' in event['flags']:
            notify_info = dict(message_id=event['message']['id'])

            if not event.get('push_notified', False):
                notify_info['send_push'] = True
            if not event.get('email_notified', False):
                notify_info['send_email'] = True
            message_ids_to_notify.append(notify_info)

    for notify_info in message_ids_to_notify:
        msg_id = notify_info['message_id']
        notice = build_offline_notification(user_profile_id, msg_id)
        if notify_info.get('send_push', False):
            queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
        if notify_info.get('send_email', False):
            queue_json_publish("missedmessage_emails", notice, lambda notice: None)
Example #4
0
def mirror_email_message(data: Dict[str, str]) -> Dict[str, str]:
    rcpt_to = data['recipient']
    if is_missed_message_address(rcpt_to):
        try:
            mark_missed_message_address_as_used(rcpt_to)
        except ZulipEmailForwardError:
            return {
                "status": "error",
                "msg": "5.1.1 Bad destination mailbox address: "
                       "Bad or expired missed message address."
            }
    else:
        try:
            extract_and_validate(rcpt_to)
        except ZulipEmailForwardError:
            return {
                "status": "error",
                "msg": "5.1.1 Bad destination mailbox address: "
                       "Please use the address specified in your Streams page."
            }
    queue_json_publish(
        "email_mirror",
        {
            "message": data['msg_text'],
            "rcpt_to": rcpt_to
        }
    )
    return {"status": "success"}
Example #5
0
    def test_register_consumer_nack(self) -> None:
        output = []
        count = 0

        queue_client = get_queue_client()

        def collect(event: Dict[str, Any]) -> None:
            queue_client.stop_consuming()
            nonlocal count
            count += 1
            if count == 1:
                raise Exception("Make me nack!")
            output.append(event)

        queue_client.register_json_consumer("test_suite", collect)
        queue_json_publish("test_suite", {"event": "my_event"})

        try:
            queue_client.start_consuming()
        except Exception:
            queue_client.register_json_consumer("test_suite", collect)
            queue_client.start_consuming()

        # Confirm that we processed the event fully once
        self.assertEqual(count, 2)
        self.assertEqual(len(output), 1)
        self.assertEqual(output[0]['event'], 'my_event')
Example #6
0
def report_error(request: HttpRequest, user_profile: UserProfile, message: str=REQ(),
                 stacktrace: str=REQ(), ui_message: bool=REQ(validator=check_bool),
                 user_agent: str=REQ(), href: str=REQ(), log: str=REQ(),
                 more_info: Optional[Dict[str, Any]]=REQ(validator=check_dict([]), default=None)
                 ) -> HttpResponse:
    """Accepts an error report and stores in a queue for processing.  The
    actual error reports are later handled by do_report_error (below)"""
    if not settings.BROWSER_ERROR_REPORTING:
        return json_success()
    if more_info is None:
        more_info = {}

    js_source_map = get_js_source_map()
    if js_source_map:
        stacktrace = js_source_map.annotate_stacktrace(stacktrace)

    try:
        version = subprocess.check_output(["git", "log", "HEAD^..HEAD", "--oneline"],
                                          universal_newlines=True)  # type: Optional[str]
    except Exception:
        version = None

    # Get the IP address of the request
    remote_ip = request.META.get('HTTP_X_REAL_IP')
    if remote_ip is None:
        remote_ip = request.META['REMOTE_ADDR']

    # For the privacy of our users, we remove any actual text content
    # in draft_content (from drafts rendering exceptions).  See the
    # comment on privacy_clean_markdown for more details.
    if more_info.get('draft_content'):
        more_info['draft_content'] = privacy_clean_markdown(more_info['draft_content'])

    if user_profile.is_authenticated:
        email = user_profile.delivery_email
        full_name = user_profile.full_name
    else:
        email = "*****@*****.**"
        full_name = "Anonymous User"

    queue_json_publish('error_reports', dict(
        type = "browser",
        report = dict(
            host = request.get_host().split(":")[0],
            ip_address = remote_ip,
            user_email = email,
            user_full_name = full_name,
            user_visible = ui_message,
            server_path = settings.DEPLOY_ROOT,
            version = version,
            user_agent = user_agent,
            href = href,
            message = message,
            stacktrace = stacktrace,
            log = log,
            more_info = more_info,
        )
    ))

    return json_success()
Example #7
0
    def test_queue_basics_json(self) -> None:
        queue_json_publish("test_suite", {"event": "my_event"})

        queue_client = get_queue_client()
        result = queue_client.drain_queue("test_suite", json=True)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['event'], 'my_event')
Example #8
0
def json_report_error(request, user_profile, message=REQ(), stacktrace=REQ(),
                      ui_message=REQ(validator=check_bool), user_agent=REQ(),
                      href=REQ(), log=REQ(),
                      more_info=REQ(validator=check_dict([]), default=None)):
    # type: (HttpRequest, UserProfile, text_type, text_type, bool, text_type, text_type, text_type, Dict[str, Any]) -> HttpResponse
    if not settings.ERROR_REPORTING:
        return json_success()

    if js_source_map:
        stacktrace = js_source_map.annotate_stacktrace(stacktrace)

    try:
        version = subprocess.check_output(["git", "log", "HEAD^..HEAD", "--oneline"], universal_newlines=True)
    except Exception:
        version = None

    queue_json_publish('error_reports', dict(
        type = "browser",
        report = dict(
            user_email = user_profile.email,
            user_full_name = user_profile.full_name,
            user_visible = ui_message,
            server_path = settings.DEPLOY_ROOT,
            version = version,
            user_agent = user_agent,
            href = href,
            message = message,
            stacktrace = stacktrace,
            log = log,
            more_info = more_info,
        )
    ), lambda x: None)

    return json_success()
Example #9
0
def email_on_new_login(sender: Any, user: UserProfile, request: Any, **kwargs: Any) -> None:
    # We import here to minimize the dependencies of this module,
    # since it runs as part of `manage.py` initialization
    from zerver.context_processors import common_context

    if not settings.SEND_LOGIN_EMAILS:
        return

    if request:
        # If the user's account was just created, avoid sending an email.
        if getattr(user, "just_registered", False):
            return

        user_agent = request.META.get('HTTP_USER_AGENT', "").lower()

        context = common_context(user)
        context['user_email'] = user.email
        user_tz = user.timezone
        if user_tz == '':
            user_tz = timezone_get_current_timezone_name()
        local_time = timezone_now().astimezone(get_timezone(user_tz))
        context['login_time'] = local_time.strftime('%A, %B %d, %Y at %I:%M%p ') + user_tz
        context['device_ip'] = request.META.get('REMOTE_ADDR') or _("Unknown IP address")
        context['device_os'] = get_device_os(user_agent)
        context['device_browser'] = get_device_browser(user_agent)

        email_dict = {
            'template_prefix': 'zerver/emails/notify_new_login',
            'to_user_id': user.id,
            'from_name': 'Zulip Account Security',
            'from_address': FromAddress.NOREPLY,
            'context': context}
        queue_json_publish("email_senders", email_dict)
Example #10
0
    def emit(self, record):
        # type: (ExceptionReporter) -> None
        try:
            request = record.request  # type: HttpRequest

            exception_filter = get_exception_reporter_filter(request)

            if record.exc_info:
                stack_trace = ''.join(traceback.format_exception(*record.exc_info))
            else:
                stack_trace = None

            try:
                user_profile = request.user
                user_full_name = user_profile.full_name
                user_email = user_profile.email
            except Exception:
                traceback.print_exc()
                # Error was triggered by an anonymous user.
                user_full_name = None
                user_email = None

            data = request.GET if request.method == 'GET' else \
                exception_filter.get_post_parameters(request)

            report = dict(
                node = platform.node(),
                method = request.method,
                path = request.path,
                data = data,
                remote_addr = request.META.get('REMOTE_ADDR', None),
                query_string = request.META.get('QUERY_STRING', None),
                server_name = request.META.get('SERVER_NAME', None),
                message = record.getMessage(),
                stack_trace = stack_trace,
                user_full_name = user_full_name,
                user_email = user_email,
            )
        except Exception:
            traceback.print_exc()
            report = dict(
                node = platform.node(),
                message = record.getMessage(),
            )

        try:
            if settings.STAGING_ERROR_NOTIFICATIONS:
                # On staging, process the report directly so it can happen inside this
                # try/except to prevent looping
                from zilencer.error_notify import notify_server_error
                notify_server_error(report)
            else:
                queue_json_publish('error_reports', dict(
                    type = "server",
                    report = report,
                ), lambda x: None)
        except Exception:
            # If this breaks, complain loudly but don't pass the traceback up the stream
            # However, we *don't* want to use logging.exception since that could trigger a loop.
            logging.warning("Reporting an exception triggered an exception!", exc_info=True)
Example #11
0
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        queue_name = options['queue_name']
        file_name = options['file_name']

        if file_name == '-':
            f = sys.stdin # type: IO[str]
        else:
            f = open(file_name)

        while True:
            line = f.readline()
            if not line:
                break

            line = line.strip()
            try:
                payload = line.split('\t')[1]
            except IndexError:
                payload = line

            print('Queueing to queue %s: %s' % (queue_name, payload))

            # Verify that payload is valid json.
            data = ujson.loads(payload)

            queue_json_publish(queue_name, data, error)
Example #12
0
    def on_message(self, msg_raw):
        # type: (str) -> None
        log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
        record_request_start_data(log_data)
        msg = ujson.loads(msg_raw)

        if self.did_close:
            logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
                        % (self.session.transport_name,
                           self.session.user_profile.email if self.session.user_profile is not None else 'unknown',
                           self.client_id))

        self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})

        if msg['type'] == 'auth':
            log_data['extra'] += ']'
            try:
                self.authenticate_client(msg)
                # TODO: Fill in the correct client
                write_log_line(log_data, path='/socket/auth', method='SOCKET',
                               remote_ip=self.session.conn_info.ip,
                               email=self.session.user_profile.email,
                               client_name='?')
            except SocketAuthError as e:
                response = {'result': 'error', 'msg': e.msg}
                self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
                                           'response': response})
                write_log_line(log_data, path='/socket/auth', method='SOCKET',
                               remote_ip=self.session.conn_info.ip,
                               email='unknown', client_name='?',
                               status_code=403, error_content=ujson.dumps(response))
            return
        else:
            if not self.authenticated:
                response = {'result': 'error', 'msg': "Not yet authenticated"}
                self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
                                           'response': response})
                write_log_line(log_data, path='/socket/service_request', method='SOCKET',
                               remote_ip=self.session.conn_info.ip,
                               email='unknown', client_name='?',
                               status_code=403, error_content=ujson.dumps(response))
                return

        redis_key = req_redis_key(msg['req_id'])
        with redis_client.pipeline() as pipeline:
            pipeline.hmset(redis_key, {'status': 'received'})
            pipeline.expire(redis_key, 60 * 60 * 24)
            pipeline.execute()

        record_request_stop_data(log_data)
        queue_json_publish("message_sender",
                           dict(request=msg['request'],
                                req_id=msg['req_id'],
                                server_meta=dict(user_id=self.session.user_profile.id,
                                                 client_id=self.client_id,
                                                 return_queue="tornado_return",
                                                 log_data=log_data,
                                                 request_environ=dict(REMOTE_ADDR=self.session.conn_info.ip))),
                           fake_message_sender)
Example #13
0
def send_event(event, users):
    # type: (Mapping[str, Any], Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None
    """`users` is a list of user IDs, or in the case of `message` type
    events, a list of dicts describing the users and metadata about
    the user/message pair."""
    queue_json_publish("notify_tornado",
                       dict(event=event, users=users),
                       send_notification_http)
Example #14
0
def send_event(realm: Realm, event: Mapping[str, Any],
               users: Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None:
    """`users` is a list of user IDs, or in the case of `message` type
    events, a list of dicts describing the users and metadata about
    the user/message pair."""
    port = get_tornado_port(realm)
    queue_json_publish(notify_tornado_queue_name(port),
                       dict(event=event, users=users),
                       lambda *args, **kwargs: send_notification_http(realm, *args, **kwargs))
Example #15
0
def request_retry(event, failure_message):
    # type: (Dict[str, Any], Text) -> None
    event['failed_tries'] += 1
    if event['failed_tries'] > MAX_REQUEST_RETRIES:
        bot_user = get_user_profile_by_id(event['user_profile_id'])
        failure_message = "Maximum retries exceeded! " + failure_message
        fail_with_message(event, failure_message)
        logging.warning("Maximum retries exceeded for trigger:%s event:%s" % (bot_user.email, event['command']))
    else:
        queue_json_publish("outgoing_webhooks", event, lambda x: None)
Example #16
0
    def consume(self, event):
        # type: (Mapping[str, Any]) -> None
        server_meta = event['server_meta']

        environ = {
            'REQUEST_METHOD': 'SOCKET',
            'SCRIPT_NAME': '',
            'PATH_INFO': '/json/messages',
            'SERVER_NAME': '127.0.0.1',
            'SERVER_PORT': 9993,
            'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
            'wsgi.version': (1, 0),
            'wsgi.input': StringIO(),
            'wsgi.errors': sys.stderr,
            'wsgi.multithread': False,
            'wsgi.multiprocess': True,
            'wsgi.run_once': False,
            'zulip.emulated_method': 'POST'
        }

        if 'socket_user_agent' in event['request']:
            environ['HTTP_USER_AGENT'] = event['request']['socket_user_agent']
            del event['request']['socket_user_agent']

        # We're mostly using a WSGIRequest for convenience
        environ.update(server_meta['request_environ'])
        request = WSGIRequest(environ)
        # Note: If we ever support non-POST methods, we'll need to change this.
        request._post = event['request']
        request.csrf_processing_done = True

        user_profile = get_user_profile_by_id(server_meta['user_id'])
        request._cached_user = user_profile

        resp = self.handler.get_response(request)
        server_meta['time_request_finished'] = time.time()
        server_meta['worker_log_data'] = request._log_data

        resp_content = resp.content.decode('utf-8')
        response_data = ujson.loads(resp_content)
        if response_data['result'] == 'error':
            check_and_send_restart_signal()

        result = {'response': response_data, 'req_id': event['req_id'],
                  'server_meta': server_meta}

        redis_key = req_redis_key(event['req_id'])
        self.redis_client.hmset(redis_key, {'status': 'complete',
                                            'response': resp_content})

        # Since this sends back to Tornado, we can't use
        # call_consume_in_tests here.
        queue_json_publish(server_meta['return_queue'], result, lambda e: None)
Example #17
0
    def emit(self, record: logging.LogRecord) -> None:
        report = {}  # type: Dict[str, Any]

        try:
            report['node'] = platform.node()
            report['host'] = platform.node()

            add_deployment_metadata(report)

            if record.exc_info:
                stack_trace = ''.join(traceback.format_exception(*record.exc_info))
                message = str(record.exc_info[1])
            else:
                stack_trace = 'No stack trace available'
                message = record.getMessage()
                if '\n' in message:
                    # Some exception code paths in queue processors
                    # seem to result in super-long messages
                    stack_trace = message
                    message = message.split('\n')[0]
            report['stack_trace'] = stack_trace
            report['message'] = message

            report['logger_name'] = record.name
            report['log_module'] = find_log_caller_module(record)
            report['log_lineno'] = record.lineno

            if hasattr(record, "request"):
                add_request_metadata(report, record.request)  # type: ignore  # record.request is added dynamically

        except Exception:
            report['message'] = "Exception in preparing exception report!"
            logging.warning(report['message'], exc_info=True)
            report['stack_trace'] = "See /var/log/zulip/errors.log"

        try:
            if settings.STAGING_ERROR_NOTIFICATIONS:
                # On staging, process the report directly so it can happen inside this
                # try/except to prevent looping
                from zerver.lib.error_notify import notify_server_error
                notify_server_error(report)
            else:
                queue_json_publish('error_reports', dict(
                    type = "server",
                    report = report,
                ))
        except Exception:
            # If this breaks, complain loudly but don't pass the traceback up the stream
            # However, we *don't* want to use logging.exception since that could trigger a loop.
            logging.warning("Reporting an exception triggered an exception!", exc_info=True)
Example #18
0
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        if six.PY3:
            print(py3_warning)
            return
        rcpt_to = os.environ.get("ORIGINAL_RECIPIENT", options['recipient'])
        if rcpt_to is not None:
            if is_missed_message_address(rcpt_to):
                try:
                    mark_missed_message_address_as_used(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3
            else:
                try:
                    extract_and_validate(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Please use the address specified "
                          "in your Streams page.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3

            # Read in the message, at most 25MiB. This is the limit enforced by
            # Gmail, which we use here as a decent metric.
            message = sys.stdin.read(25*1024*1024)

            if len(sys.stdin.read(1)) != 0:
                # We're not at EOF, reject large mail.
                print("5.3.4 Message too big for system: Max size is 25MiB")
                exit(posix.EX_DATAERR) # type: ignore # There are no stubs for posix in python 3

            queue_json_publish(
                    "email_mirror",
                    {
                        "message": message,
                        "rcpt_to": rcpt_to
                    },
                    lambda x: None
            )
        else:
            # We're probably running from cron, try to batch-process mail
            if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
                not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
                not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
                print("Please configure the Email Mirror Gateway in /etc/zulip/, "
                      "or specify $ORIGINAL_RECIPIENT if piping a single mail.")
                exit(1)
            reactor.callLater(0, main)
            reactor.run()
Example #19
0
    def test_register_consumer(self) -> None:
        output = []

        queue_client = get_queue_client()

        def collect(event: Dict[str, Any]) -> None:
            output.append(event)
            queue_client.stop_consuming()

        queue_client.register_json_consumer("test_suite", collect)
        queue_json_publish("test_suite", {"event": "my_event"})

        queue_client.start_consuming()

        self.assertEqual(len(output), 1)
        self.assertEqual(output[0]['event'], 'my_event')
Example #20
0
def update_user_activity(request, user_profile):
    # update_active_status also pushes to rabbitmq, and it seems
    # redundant to log that here as well.
    if request.META["PATH_INFO"] == '/json/users/me/presence':
        return

    if hasattr(request, '_query'):
        query = request._query
    else:
        query = request.META['PATH_INFO']

    event={'query': query,
           'user_profile_id': user_profile.id,
           'time': datetime_to_timestamp(now()),
           'client': request.client.name}
    queue_json_publish("user_activity", event, lambda event: None)
Example #21
0
    def emit(self, record):
        # type: (logging.LogRecord) -> None
        try:
            if record.exc_info:
                stack_trace = ''.join(traceback.format_exception(*record.exc_info))  # type: Optional[str]
                message = str(record.exc_info[1])
            else:
                stack_trace = None
                message = record.getMessage()
                if '\n' in message:
                    # Some exception code paths in queue processors
                    # seem to result in super-long messages
                    stack_trace = message
                    message = message.split('\n')[0]

            report = dict(
                node = platform.node(),
                host = platform.node(),
                message = message,
                stack_trace = stack_trace,
            )
            if hasattr(record, "request"):
                add_request_metadata(report, record.request)  # type: ignore  # record.request is added dynamically
        except Exception:
            traceback.print_exc()
            report = dict(
                node = platform.node(),
                host = platform.node(),
                message = record.getMessage(),
                stack_trace = "See /var/log/zulip/errors.log",
            )

        try:
            if settings.STAGING_ERROR_NOTIFICATIONS:
                # On staging, process the report directly so it can happen inside this
                # try/except to prevent looping
                from zerver.lib.error_notify import notify_server_error
                notify_server_error(report)
            else:
                queue_json_publish('error_reports', dict(
                    type = "server",
                    report = report,
                ), lambda x: None)
        except Exception:
            # If this breaks, complain loudly but don't pass the traceback up the stream
            # However, we *don't* want to use logging.exception since that could trigger a loop.
            logging.warning("Reporting an exception triggered an exception!", exc_info=True)
Example #22
0
    def handle(self, *args: Any, **options: Any) -> None:
        print("Purging queue...")
        queue = SimpleQueueClient()
        queue_name = "noop_batch" if options["batch"] else "noop"
        queue.ensure_queue(queue_name,
                           lambda channel: channel.queue_purge("noop"))
        count = options["count"]
        reps = options["reps"]

        with open(options["csv"], "w", newline="") as csvfile:
            writer = csv.DictWriter(
                csvfile,
                fieldnames=["Queue size", "Queue type", "Prefetch", "Rate"])
            writer.writeheader()

            for prefetch in options["prefetches"]:
                print(f"Queue size {count}, prefetch {prefetch}...")
                worker: Union[NoopWorker, BatchNoopWorker] = NoopWorker(
                    count, options["slow"])
                if options["batch"]:
                    worker = BatchNoopWorker(count, options["slow"])
                    if prefetch > 0 and prefetch < worker.batch_size:
                        print(
                            f"    Skipping, as prefetch {prefetch} is less than batch size {worker.batch_size}"
                        )
                        continue
                worker.ENABLE_TIMEOUTS = True
                worker.setup()

                assert worker.q is not None
                assert worker.q.channel is not None
                worker.q.channel.basic_qos(prefetch_count=prefetch)

                total_time = 0.0
                for i in range(1, reps + 1):
                    worker.consumed = 0
                    timeit(
                        lambda: queue_json_publish(queue_name, {}),
                        number=count,
                    )
                    duration = timeit(
                        lambda: worker.start(),
                        number=1,
                    )
                    print(
                        f"    {i}/{reps}: {count}/{duration}s = {count / duration}/s"
                    )
                    total_time += duration
                    writer.writerow({
                        "Queue size": count,
                        "Queue type": queue_name,
                        "Prefetch": prefetch,
                        "Rate": count / duration,
                    })
                    csvfile.flush()
                print(
                    f"  Overall: {reps * count}/{total_time}s = {(reps * count) / total_time}/s"
                )
Example #23
0
def maybe_enqueue_notifications(user_profile_id: int, message_id: int, private_message: bool,
                                mentioned: bool, stream_push_notify: bool,
                                stream_email_notify: bool, stream_name: Optional[str],
                                always_push_notify: bool, idle: bool,
                                already_notified: Dict[str, bool]) -> Dict[str, bool]:
    """This function has a complete unit test suite in
    `test_enqueue_notifications` that should be expanded as we add
    more features here."""
    notified = dict()  # type: Dict[str, bool]

    if (idle or always_push_notify) and (private_message or mentioned or stream_push_notify):
        notice = build_offline_notification(user_profile_id, message_id)
        if private_message:
            notice['trigger'] = 'private_message'
        elif mentioned:
            notice['trigger'] = 'mentioned'
        elif stream_push_notify:
            notice['trigger'] = 'stream_push_notify'
        else:
            raise AssertionError("Unknown notification trigger!")
        notice['stream_name'] = stream_name
        if not already_notified.get("push_notified"):
            queue_json_publish("missedmessage_mobile_notifications", notice)
            notified['push_notified'] = True

    # Send missed_message emails if a private message or a
    # mention.  Eventually, we'll add settings to allow email
    # notifications to match the model of push notifications
    # above.
    if idle and (private_message or mentioned or stream_email_notify):
        notice = build_offline_notification(user_profile_id, message_id)
        if private_message:
            notice['trigger'] = 'private_message'
        elif mentioned:
            notice['trigger'] = 'mentioned'
        elif stream_email_notify:
            notice['trigger'] = 'stream_email_notify'
        else:
            raise AssertionError("Unknown notification trigger!")
        notice['stream_name'] = stream_name
        if not already_notified.get("email_notified"):
            queue_json_publish("missedmessage_emails", notice, lambda notice: None)
            notified['email_notified'] = True

    return notified
Example #24
0
def email_on_new_login(sender: Any, user: UserProfile, request: Any,
                       **kwargs: Any) -> None:
    if not user.enable_login_emails:
        return
    # We import here to minimize the dependencies of this module,
    # since it runs as part of `manage.py` initialization
    from zerver.context_processors import common_context

    if not settings.SEND_LOGIN_EMAILS:
        return

    if request:
        # If the user's account was just created, avoid sending an email.
        if (timezone_now() -
                user.date_joined).total_seconds() <= JUST_CREATED_THRESHOLD:
            return

        user_agent = request.META.get('HTTP_USER_AGENT', "").lower()

        context = common_context(user)
        context['user_email'] = user.email
        user_tz = user.timezone
        if user_tz == '':
            user_tz = timezone_get_current_timezone_name()
        local_time = timezone_now().astimezone(get_timezone(user_tz))
        if user.twenty_four_hour_time:
            hhmm_string = local_time.strftime('%H:%M')
        else:
            hhmm_string = local_time.strftime('%I:%M%p')
        context['login_time'] = local_time.strftime(
            '%A, %B %d, %Y at {} %Z'.format(hhmm_string))
        context['device_ip'] = request.META.get('REMOTE_ADDR') or _(
            "Unknown IP address")
        context['device_os'] = get_device_os(user_agent)
        context['device_browser'] = get_device_browser(user_agent)
        context['unsubscribe_link'] = one_click_unsubscribe_link(user, 'login')

        email_dict = {
            'template_prefix': 'zerver/emails/notify_new_login',
            'to_user_id': user.id,
            'from_name': 'Zulip Account Security',
            'from_address': FromAddress.NOREPLY,
            'context': context
        }
        queue_json_publish("email_senders", email_dict)
Example #25
0
def maybe_enqueue_notifications(user_profile_id: int, message_id: int, private_message: bool,
                                mentioned: bool, stream_push_notify: bool,
                                stream_email_notify: bool, stream_name: Optional[str],
                                always_push_notify: bool, idle: bool,
                                already_notified: Dict[str, bool]) -> Dict[str, bool]:
    """This function has a complete unit test suite in
    `test_enqueue_notifications` that should be expanded as we add
    more features here."""
    notified = dict()  # type: Dict[str, bool]

    if (idle or always_push_notify) and (private_message or mentioned or stream_push_notify):
        notice = build_offline_notification(user_profile_id, message_id)
        if private_message:
            notice['trigger'] = 'private_message'
        elif mentioned:
            notice['trigger'] = 'mentioned'
        elif stream_push_notify:
            notice['trigger'] = 'stream_push_notify'
        else:
            raise AssertionError("Unknown notification trigger!")
        notice['stream_name'] = stream_name
        if not already_notified.get("push_notified"):
            queue_json_publish("missedmessage_mobile_notifications", notice)
            notified['push_notified'] = True

    # Send missed_message emails if a private message or a
    # mention.  Eventually, we'll add settings to allow email
    # notifications to match the model of push notifications
    # above.
    if idle and (private_message or mentioned or stream_email_notify):
        notice = build_offline_notification(user_profile_id, message_id)
        if private_message:
            notice['trigger'] = 'private_message'
        elif mentioned:
            notice['trigger'] = 'mentioned'
        elif stream_email_notify:
            notice['trigger'] = 'stream_email_notify'
        else:
            raise AssertionError("Unknown notification trigger!")
        notice['stream_name'] = stream_name
        if not already_notified.get("email_notified"):
            queue_json_publish("missedmessage_emails", notice, lambda notice: None)
            notified['email_notified'] = True

    return notified
Example #26
0
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        rcpt_to = force_text(os.environ.get("ORIGINAL_RECIPIENT", options['recipient']))
        if rcpt_to is not None:
            if is_missed_message_address(rcpt_to):
                try:
                    mark_missed_message_address_as_used(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3
            else:
                try:
                    extract_and_validate(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Please use the address specified "
                          "in your Streams page.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3

            # Read in the message, at most 25MiB. This is the limit enforced by
            # Gmail, which we use here as a decent metric.
            msg_text = sys.stdin.read(25*1024*1024)

            if len(sys.stdin.read(1)) != 0:
                # We're not at EOF, reject large mail.
                print("5.3.4 Message too big for system: Max size is 25MiB")
                exit(posix.EX_DATAERR) # type: ignore # There are no stubs for posix in python 3

            queue_json_publish(
                "email_mirror",
                {
                    "message": msg_text,
                    "rcpt_to": rcpt_to
                },
                lambda x: None
            )
        else:
            # We're probably running from cron, try to batch-process mail
            if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
                not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
                    not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
                print("Please configure the Email Mirror Gateway in /etc/zulip/, "
                      "or specify $ORIGINAL_RECIPIENT if piping a single mail.")
                exit(1)
            for message in get_imap_messages():
                process_message(message)
Example #27
0
def json_report_error(request,
                      user_profile,
                      message=REQ(),
                      stacktrace=REQ(),
                      ui_message=REQ(validator=check_bool),
                      user_agent=REQ(),
                      href=REQ(),
                      log=REQ(),
                      more_info=REQ(validator=check_dict([]), default=None)):
    # type: (HttpRequest, UserProfile, Text, Text, bool, Text, Text, Text, Dict[str, Any]) -> HttpResponse
    """Accepts an error report and stores in a queue for processing.  The
    actual error reports are later handled by do_report_error (below)"""
    if not settings.BROWSER_ERROR_REPORTING:
        return json_success()

    js_source_map = get_js_source_map()
    if js_source_map:
        stacktrace = js_source_map.annotate_stacktrace(stacktrace)

    try:
        version = subprocess.check_output(
            ["git", "log", "HEAD^..HEAD", "--oneline"],
            universal_newlines=True)  # type: Optional[Text]
    except Exception:
        version = None

    queue_json_publish(
        'error_reports',
        dict(type="browser",
             report=dict(
                 host=request.get_host().split(":")[0],
                 user_email=user_profile.email,
                 user_full_name=user_profile.full_name,
                 user_visible=ui_message,
                 server_path=settings.DEPLOY_ROOT,
                 version=version,
                 user_agent=user_agent,
                 href=href,
                 message=message,
                 stacktrace=stacktrace,
                 log=log,
                 more_info=more_info,
             )), lambda x: None)

    return json_success()
def export_realm(request: HttpRequest, user: UserProfile) -> HttpResponse:
    # Currently only supports public-data-only exports.
    event_type = RealmAuditLog.REALM_EXPORTED
    event_time = timezone_now()
    realm = user.realm
    EXPORT_LIMIT = 5
    # Conservative limit on the size of message history in
    # organizations being exported; this exists to protect Zulip
    # against a possible unmonitored accidental DoS caused by trying
    # to export an organization with huge history.
    MAX_MESSAGE_HISTORY = 250000
    MAX_UPLOAD_QUOTA = 10 * 1024 * 1024 * 1024

    # Filter based upon the number of events that have occurred in the delta
    # If we are at the limit, the incoming request is rejected
    event_time_delta = event_time - timedelta(days=7)
    limit_check = RealmAuditLog.objects.filter(
        realm=realm, event_type=event_type, event_time__gte=event_time_delta)
    if len(limit_check) >= EXPORT_LIMIT:
        return json_error(_('Exceeded rate limit.'))

    total_messages = sum(
        realm_count.value for realm_count in RealmCount.objects.filter(
            realm=user.realm, property='messages_sent:client:day'))
    if (total_messages > MAX_MESSAGE_HISTORY or
            user.realm.currently_used_upload_space_bytes() > MAX_UPLOAD_QUOTA):
        return json_error(
            _('Please request a manual export from %s.') %
            (settings.ZULIP_ADMINISTRATOR, ))

    row = RealmAuditLog.objects.create(realm=realm,
                                       event_type=event_type,
                                       event_time=event_time,
                                       acting_user=user)
    # Using the deferred_work queue processor to avoid
    # killing the process after 60s
    event = {
        'type': "realm_export",
        'time': event_time,
        'realm_id': realm.id,
        'user_profile_id': user.id,
        'id': row.id
    }
    queue_json_publish('deferred_work', event)
    return json_success()
Example #29
0
    def consume(self, event):
        server_meta = event['server_meta']

        environ = {
            'REQUEST_METHOD': 'SOCKET',
            'SCRIPT_NAME': '',
            'PATH_INFO': '/json/messages',
            'SERVER_NAME': 'localhost',
            'SERVER_PORT': 9993,
            'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
            'wsgi.version': (1, 0),
            'wsgi.input': StringIO.StringIO(),
            'wsgi.errors': sys.stderr,
            'wsgi.multithread': False,
            'wsgi.multiprocess': True,
            'wsgi.run_once': False,
            'zulip.emulated_method': 'POST'
        }
        # We're mostly using a WSGIRequest for convenience
        environ.update(server_meta['request_environ'])
        request = WSGIRequest(environ)
        request._request = event['request']
        request.csrf_processing_done = True

        user_profile = get_user_profile_by_id(server_meta['user_id'])
        request._cached_user = user_profile

        resp = self.handler.get_response(request)
        server_meta['time_request_finished'] = time.time()
        server_meta['worker_log_data'] = request._log_data

        resp_content = resp.content
        result = {
            'response': ujson.loads(resp_content),
            'req_id': event['req_id'],
            'server_meta': server_meta
        }

        redis_key = req_redis_key(event['req_id'])
        self.redis_client.hmset(redis_key, {
            'status': 'complete',
            'response': resp_content
        })

        queue_json_publish(server_meta['return_queue'], result, lambda e: None)
Example #30
0
def update_user_presence(
    user_profile: UserProfile,
    client: Client,
    log_time: datetime.datetime,
    status: int,
    new_user_input: bool,
) -> None:
    event = {
        "user_profile_id": user_profile.id,
        "status": status,
        "time": datetime_to_timestamp(log_time),
        "client": client.name,
    }

    queue_json_publish("user_presence", event)

    if new_user_input:
        update_user_activity_interval(user_profile, log_time)
Example #31
0
def mirror_email_message(data: Dict[str, str]) -> Dict[str, str]:
    rcpt_to = data['recipient']
    try:
        validate_to_address(rcpt_to)
    except ZulipEmailForwardError as e:
        return {
            "status": "error",
            "msg": "5.1.1 Bad destination mailbox address: {}".format(e)
        }

    queue_json_publish(
        "email_mirror",
        {
            "message": data['msg_text'],
            "rcpt_to": rcpt_to
        }
    )
    return {"status": "success"}
Example #32
0
def update_user_activity(request, user_profile):
    # update_active_status also pushes to rabbitmq, and it seems
    # redundant to log that here as well.
    if request.META["PATH_INFO"] == '/json/update_active_status':
        return

    if hasattr(request, '_query'):
        query = request._query
    else:
        query = request.META['PATH_INFO']

    event = {
        'query': query,
        'user_profile_id': user_profile.id,
        'time': datetime_to_timestamp(now()),
        'client': request.client.name
    }
    queue_json_publish("user_activity", event, lambda event: None)
Example #33
0
    def test_queue_basics_json_error(self) -> None:
        queue_json_publish("test_suite", {"event": "my_event"})

        queue_client = get_queue_client()
        raised = False
        try:
            with queue_client.json_drain_queue("test_suite") as result:
                self.assertEqual(len(result), 1)
                self.assertEqual(result[0]['event'], 'my_event')
                raise ValueError()
        except ValueError:
            raised = True
        assert raised

        # Still in the queue to be fetched
        with queue_client.json_drain_queue("test_suite") as result:
            self.assertEqual(len(result), 1)
            self.assertEqual(result[0]['event'], 'my_event')
Example #34
0
 def one_rep() -> None:
     nonlocal total_enqueue_time, total_dequeue_time
     total_enqueue_time += timeit(
         lambda: queue_json_publish(queue_name, {}),
         number=count,
     )
     total_dequeue_time += timeit(
         lambda: worker.start(),
         number=1,
     )
Example #35
0
def update_user_activity(request, user_profile):
    # type: (HttpRequest, UserProfile) -> None
    # update_active_status also pushes to rabbitmq, and it seems
    # redundant to log that here as well.
    if request.META["PATH_INFO"] == "/json/users/me/presence":
        return

    if hasattr(request, "_query"):
        query = request._query
    else:
        query = request.META["PATH_INFO"]

    event = {
        "query": query,
        "user_profile_id": user_profile.id,
        "time": datetime_to_timestamp(now()),
        "client": request.client.name,
    }
    queue_json_publish("user_activity", event, lambda event: None)
Example #36
0
def update_user_activity(request: HttpRequest, user_profile: UserProfile,
                         query: Optional[str]) -> None:
    # update_active_status also pushes to rabbitmq, and it seems
    # redundant to log that here as well.
    if request.META["PATH_INFO"] == '/json/users/me/presence':
        return

    if query is not None:
        pass
    elif hasattr(request, '_query'):
        query = request._query
    else:
        query = request.META['PATH_INFO']

    event = {'query': query,
             'user_profile_id': user_profile.id,
             'time': datetime_to_timestamp(timezone_now()),
             'client_id': request.client.id}
    queue_json_publish("user_activity", event, lambda event: None)
Example #37
0
def do_regenerate_api_key(user_profile: UserProfile,
                          acting_user: UserProfile) -> str:
    old_api_key = user_profile.api_key
    new_api_key = generate_api_key()
    user_profile.api_key = new_api_key
    user_profile.save(update_fields=["api_key"])

    # We need to explicitly delete the old API key from our caches,
    # because the on-save handler for flushing the UserProfile object
    # in zerver/lib/cache.py only has access to the new API key.
    cache_delete(user_profile_by_api_key_cache_key(old_api_key))

    event_time = timezone_now()
    RealmAuditLog.objects.create(
        realm=user_profile.realm,
        acting_user=acting_user,
        modified_user=user_profile,
        event_type=RealmAuditLog.USER_API_KEY_CHANGED,
        event_time=event_time,
    )

    if user_profile.is_bot:
        send_event(
            user_profile.realm,
            dict(
                type="realm_bot",
                op="update",
                bot=dict(
                    user_id=user_profile.id,
                    api_key=new_api_key,
                ),
            ),
            bot_owner_user_ids(user_profile),
        )

    event = {
        "type": "clear_push_device_tokens",
        "user_profile_id": user_profile.id
    }
    queue_json_publish("deferred_work", event)

    return new_api_key
Example #38
0
    def test_queue_error_json(self) -> None:
        queue_client = get_queue_client()
        actual_publish = queue_client.publish

        self.counter = 0

        def throw_connection_error_once(self_obj: Any, *args: Any,
                                        **kwargs: Any) -> None:
            self.counter += 1
            if self.counter <= 1:
                raise AMQPConnectionError("test")
            actual_publish(*args, **kwargs)

        with mock.patch("zerver.lib.queue.SimpleQueueClient.publish",
                        throw_connection_error_once):
            queue_json_publish("test_suite", {"event": "my_event"})

        result = queue_client.drain_queue("test_suite", json=True)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['event'], 'my_event')
Example #39
0
    def test_queue_error_json(self) -> None:
        queue_client = get_queue_client()
        actual_publish = queue_client.publish

        self.counter = 0

        def throw_connection_error_once(self_obj: Any, *args: Any,
                                        **kwargs: Any) -> None:
            self.counter += 1
            if self.counter <= 1:
                raise AMQPConnectionError("test")
            actual_publish(*args, **kwargs)

        with mock.patch("zerver.lib.queue.SimpleQueueClient.publish",
                        throw_connection_error_once):
            queue_json_publish("test_suite", {"event": "my_event"})

        result = queue_client.drain_queue("test_suite", json=True)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['event'], 'my_event')
Example #40
0
def email_on_new_login(sender: Any, user: UserProfile, request: Any, **kwargs: Any) -> None:
    if not user.enable_login_emails:
        return
    # We import here to minimize the dependencies of this module,
    # since it runs as part of `manage.py` initialization
    from zerver.context_processors import common_context

    if not settings.SEND_LOGIN_EMAILS:
        return

    if request:
        # If the user's account was just created, avoid sending an email.
        if (timezone_now() - user.date_joined).total_seconds() <= JUST_CREATED_THRESHOLD:
            return

        user_agent = request.headers.get("User-Agent", "").lower()

        context = common_context(user)
        context["user_email"] = user.delivery_email
        user_tz = user.timezone
        if user_tz == "":
            user_tz = timezone_get_current_timezone_name()
        local_time = timezone_now().astimezone(pytz.timezone(user_tz))
        if user.twenty_four_hour_time:
            hhmm_string = local_time.strftime("%H:%M")
        else:
            hhmm_string = local_time.strftime("%I:%M%p")
        context["login_time"] = local_time.strftime(f"%A, %B %d, %Y at {hhmm_string} %Z")
        context["device_ip"] = request.META.get("REMOTE_ADDR") or _("Unknown IP address")
        context["device_os"] = get_device_os(user_agent) or _("an unknown operating system")
        context["device_browser"] = get_device_browser(user_agent) or _("An unknown browser")
        context["unsubscribe_link"] = one_click_unsubscribe_link(user, "login")

        email_dict = {
            "template_prefix": "zerver/emails/notify_new_login",
            "to_user_ids": [user.id],
            "from_name": FromAddress.security_email_from_name(user_profile=user),
            "from_address": FromAddress.NOREPLY,
            "context": context,
        }
        queue_json_publish("email_senders", email_dict)
Example #41
0
def json_report_error(request,
                      user_profile,
                      message=REQ(),
                      stacktrace=REQ(),
                      ui_message=REQ(validator=check_bool),
                      user_agent=REQ(),
                      href=REQ(),
                      log=REQ(),
                      more_info=REQ(validator=check_dict([]), default=None)):
    # type: (HttpRequest, UserProfile, text_type, text_type, bool, text_type, text_type, text_type, Dict[str, Any]) -> HttpResponse
    if not settings.ERROR_REPORTING:
        return json_success()

    if js_source_map:
        stacktrace = js_source_map.annotate_stacktrace(stacktrace)

    try:
        version = subprocess.check_output(
            ["git", "log", "HEAD^..HEAD", "--oneline"],
            universal_newlines=True)
    except Exception:
        version = None

    queue_json_publish(
        'error_reports',
        dict(type="browser",
             report=dict(
                 user_email=user_profile.email,
                 user_full_name=user_profile.full_name,
                 user_visible=ui_message,
                 server_path=settings.DEPLOY_ROOT,
                 version=version,
                 user_agent=user_agent,
                 href=href,
                 message=message,
                 stacktrace=stacktrace,
                 log=log,
                 more_info=more_info,
             )), lambda x: None)

    return json_success()
Example #42
0
    def handle(self, *args, **options):
        rcpt_to = os.environ.get("ORIGINAL_RECIPIENT", options['recipient'])
        if rcpt_to is not None:
            if is_missed_message_address(rcpt_to):
                try:
                    mark_missed_message_address_as_used(rcpt_to)
                except ZulipEmailForwardError:
                    print "5.1.1 Bad destination mailbox address: Bad or expired missed message address."
                    exit(posix.EX_NOUSER)
            else:
                try:
                    extract_and_validate(rcpt_to)
                except ZulipEmailForwardError:
                    print "5.1.1 Bad destination mailbox address: Please use the address specified in your Streams page."
                    exit(posix.EX_NOUSER)

            # Read in the message, at most 25MiB. This is the limit enforced by
            # Gmail, which we use here as a decent metric.
            message = sys.stdin.read(25 * 1024 * 1024)

            if len(sys.stdin.read(1)) != 0:
                # We're not at EOF, reject large mail.
                print "5.3.4 Message too big for system: Max size is 25MiB"
                exit(posix.EX_DATAERR)

            queue_json_publish("email_mirror", {
                "message": message,
                "rcpt_to": rcpt_to
            }, lambda x: None)
        else:
            # We're probably running from cron, try to batch-process mail
            if (not settings.EMAIL_GATEWAY_BOT
                    or not settings.EMAIL_GATEWAY_LOGIN
                    or not settings.EMAIL_GATEWAY_PASSWORD
                    or not settings.EMAIL_GATEWAY_IMAP_SERVER
                    or not settings.EMAIL_GATEWAY_IMAP_PORT
                    or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
                print "Please configure the Email Mirror Gateway in your local_settings.py, or specify $ORIGINAL_RECIPIENT if piping a single mail."
                exit(1)
            reactor.callLater(0, main)
            reactor.run()
Example #43
0
def update_user_activity(request: HttpRequest, user_profile: UserProfile,
                         query: Optional[str]) -> None:
    # update_active_status also pushes to RabbitMQ, and it seems
    # redundant to log that here as well.
    if request.META["PATH_INFO"] == "/json/users/me/presence":
        return

    if query is not None:
        pass
    elif hasattr(request, "_query"):
        query = request._query
    else:
        query = request.META["PATH_INFO"]

    event = {
        "query": query,
        "user_profile_id": user_profile.id,
        "time": datetime_to_timestamp(timezone_now()),
        "client_id": request.client.id,
    }
    queue_json_publish("user_activity", event, lambda event: None)
Example #44
0
def email_on_new_login(sender: Any, user: UserProfile, request: Any, **kwargs: Any) -> None:
    if not user.enable_login_emails:
        return
    # We import here to minimize the dependencies of this module,
    # since it runs as part of `manage.py` initialization
    from zerver.context_processors import common_context

    if not settings.SEND_LOGIN_EMAILS:
        return

    if request:
        # If the user's account was just created, avoid sending an email.
        if (timezone_now() - user.date_joined).total_seconds() <= JUST_CREATED_THRESHOLD:
            return

        user_agent = request.META.get('HTTP_USER_AGENT', "").lower()

        context = common_context(user)
        context['user_email'] = user.email
        user_tz = user.timezone
        if user_tz == '':
            user_tz = timezone_get_current_timezone_name()
        local_time = timezone_now().astimezone(get_timezone(user_tz))
        if user.twenty_four_hour_time:
            hhmm_string = local_time.strftime('%H:%M')
        else:
            hhmm_string = local_time.strftime('%I:%M%p')
        context['login_time'] = local_time.strftime('%A, %B %d, %Y at {} %Z'.format(hhmm_string))
        context['device_ip'] = request.META.get('REMOTE_ADDR') or _("Unknown IP address")
        context['device_os'] = get_device_os(user_agent) or _("an unknown operating system")
        context['device_browser'] = get_device_browser(user_agent) or _("An unknown browser")
        context['unsubscribe_link'] = one_click_unsubscribe_link(user, 'login')

        email_dict = {
            'template_prefix': 'zerver/emails/notify_new_login',
            'to_user_ids': [user.id],
            'from_name': 'Zulip Account Security',
            'from_address': FromAddress.NOREPLY,
            'context': context}
        queue_json_publish("email_senders", email_dict)
Example #45
0
def maybe_enqueue_notifications(user_profile_id, message_id, private_message,
                                mentioned, stream_push_notify, stream_name,
                                always_push_notify, idle, already_notified):
    # type: (int, int, bool, bool, bool, Optional[str], bool, bool, Dict[str, bool]) -> Dict[str, bool]
    """This function has a complete unit test suite in
    `test_enqueue_notifications` that should be expanded as we add
    more features here."""
    notified = dict()  # type: Dict[str, bool]

    if (idle or always_push_notify) and (private_message or mentioned
                                         or stream_push_notify):
        notice = build_offline_notification(user_profile_id, message_id)
        if private_message:
            notice['trigger'] = 'private_message'
        elif mentioned:
            notice['trigger'] = 'mentioned'
        elif stream_push_notify:
            notice['trigger'] = 'stream_push_notify'
        else:
            raise AssertionError("Unknown notification trigger!")
        notice['stream_name'] = stream_name
        if not already_notified.get("push_notified"):
            queue_json_publish("missedmessage_mobile_notifications", notice,
                               lambda notice: None)
            notified['push_notified'] = True

    # Send missed_message emails if a private message or a
    # mention.  Eventually, we'll add settings to allow email
    # notifications to match the model of push notifications
    # above.
    if idle and (private_message or mentioned):
        # We require RabbitMQ to do this, as we can't call the email handler
        # from the Tornado process. So if there's no rabbitmq support do nothing
        if not already_notified.get("email_notified"):
            queue_json_publish("missedmessage_emails", notice,
                               lambda notice: None)
            notified['email_notified'] = True

    return notified
Example #46
0
def email_on_new_login(sender: Any, user: UserProfile, request: Any,
                       **kwargs: Any) -> None:
    # We import here to minimize the dependencies of this module,
    # since it runs as part of `manage.py` initialization
    from zerver.context_processors import common_context

    if not settings.SEND_LOGIN_EMAILS:
        return

    if request:
        # If the user's account was just created, avoid sending an email.
        if getattr(user, "just_registered", False):
            return

        login_time = timezone_now().strftime('%A, %B %d, %Y at %I:%M%p ') + \
            timezone_get_current_timezone_name()
        user_agent = request.META.get('HTTP_USER_AGENT', "").lower()
        device_browser = get_device_browser(user_agent)
        device_os = get_device_os(user_agent)
        device_ip = request.META.get('REMOTE_ADDR') or "Uknown IP address"
        device_info = {
            "device_browser": device_browser,
            "device_os": device_os,
            "device_ip": device_ip,
            "login_time": login_time
        }

        context = common_context(user)
        context['device_info'] = device_info
        context['user'] = user

        email_dict = {
            'template_prefix': 'zerver/emails/notify_new_login',
            'to_user_id': user.id,
            'from_name': 'Zulip Account Security',
            'from_address': FromAddress.NOREPLY,
            'context': context
        }
        queue_json_publish("email_senders", email_dict)
Example #47
0
def json_report_error(request, user_profile, message=REQ(), stacktrace=REQ(),
                      ui_message=REQ(validator=check_bool), user_agent=REQ(),
                      href=REQ(), log=REQ(),
                      more_info=REQ(validator=check_dict([]), default=None)):
    # type: (HttpRequest, UserProfile, Text, Text, bool, Text, Text, Text, Dict[str, Any]) -> HttpResponse
    """Accepts an error report and stores in a queue for processing.  The
    actual error reports are later handled by do_report_error (below)"""
    if not settings.BROWSER_ERROR_REPORTING:
        return json_success()

    js_source_map = get_js_source_map()
    if js_source_map:
        stacktrace = js_source_map.annotate_stacktrace(stacktrace)

    try:
        version = subprocess.check_output(["git", "log", "HEAD^..HEAD", "--oneline"],
                                          universal_newlines=True)  # type: Optional[Text]
    except Exception:
        version = None

    queue_json_publish('error_reports', dict(
        type = "browser",
        report = dict(
            host = request.get_host().split(":")[0],
            user_email = user_profile.email,
            user_full_name = user_profile.full_name,
            user_visible = ui_message,
            server_path = settings.DEPLOY_ROOT,
            version = version,
            user_agent = user_agent,
            href = href,
            message = message,
            stacktrace = stacktrace,
            log = log,
            more_info = more_info,
        )
    ), lambda x: None)

    return json_success()
Example #48
0
    def consume(self, event):
        # type: (Mapping[str, Any]) -> None
        server_meta = event['server_meta']

        environ = {'REQUEST_METHOD': 'SOCKET',
                   'SCRIPT_NAME': '',
                   'PATH_INFO': '/json/messages',
                   'SERVER_NAME': 'localhost',
                   'SERVER_PORT': 9993,
                   'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
                   'wsgi.version': (1, 0),
                   'wsgi.input': StringIO(),
                   'wsgi.errors': sys.stderr,
                   'wsgi.multithread': False,
                   'wsgi.multiprocess': True,
                   'wsgi.run_once': False,
                   'zulip.emulated_method': 'POST'}
        # We're mostly using a WSGIRequest for convenience
        environ.update(server_meta['request_environ'])
        request = WSGIRequest(environ)
        request._request = event['request']
        request.csrf_processing_done = True

        user_profile = get_user_profile_by_id(server_meta['user_id'])
        request._cached_user = user_profile

        resp = self.handler.get_response(request)
        server_meta['time_request_finished'] = time.time()
        server_meta['worker_log_data'] = request._log_data

        resp_content = resp.content.decode('utf-8')
        result = {'response': ujson.loads(resp_content), 'req_id': event['req_id'],
                  'server_meta': server_meta}

        redis_key = req_redis_key(event['req_id'])
        self.redis_client.hmset(redis_key, {'status': 'complete',
                                            'response': resp_content});

        queue_json_publish(server_meta['return_queue'], result, lambda e: None)
Example #49
0
    def consume(self, event):
        server_meta = event["server_meta"]

        environ = {
            "REQUEST_METHOD": "SOCKET",
            "SCRIPT_NAME": "",
            "PATH_INFO": "/json/messages",
            "SERVER_NAME": "localhost",
            "SERVER_PORT": 9993,
            "SERVER_PROTOCOL": "ZULIP_SOCKET/1.0",
            "wsgi.version": (1, 0),
            "wsgi.input": StringIO(),
            "wsgi.errors": sys.stderr,
            "wsgi.multithread": False,
            "wsgi.multiprocess": True,
            "wsgi.run_once": False,
            "zulip.emulated_method": "POST",
        }
        # We're mostly using a WSGIRequest for convenience
        environ.update(server_meta["request_environ"])
        request = WSGIRequest(environ)
        request._request = event["request"]
        request.csrf_processing_done = True

        user_profile = get_user_profile_by_id(server_meta["user_id"])
        request._cached_user = user_profile

        resp = self.handler.get_response(request)
        server_meta["time_request_finished"] = time.time()
        server_meta["worker_log_data"] = request._log_data

        resp_content = resp.content
        result = {"response": ujson.loads(resp_content), "req_id": event["req_id"], "server_meta": server_meta}

        redis_key = req_redis_key(event["req_id"])
        self.redis_client.hmset(redis_key, {"status": "complete", "response": resp_content})

        queue_json_publish(server_meta["return_queue"], result, lambda e: None)
Example #50
0
    def test_queue_error_json(self) -> None:
        queue_client = get_queue_client()
        actual_publish = queue_client.publish

        self.counter = 0

        def throw_connection_error_once(self_obj: Any, *args: Any,
                                        **kwargs: Any) -> None:
            self.counter += 1
            if self.counter <= 1:
                raise AMQPConnectionError("test")
            actual_publish(*args, **kwargs)

        with mock.patch("zerver.lib.queue.SimpleQueueClient.publish",
                        throw_connection_error_once), self.assertLogs('zulip.queue', level='WARN') as warn_logs:
            queue_json_publish("test_suite", {"event": "my_event"})
        self.assertEqual(warn_logs.output, [
            'WARNING:zulip.queue:Failed to send to rabbitmq, trying to reconnect and send again'
        ])

        result = queue_client.json_drain_queue("test_suite")
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['event'], 'my_event')
Example #51
0
        def wrapped_queue_json_publish(*args: Any, **kwargs: Any) -> None:
            self.create_mock_response(original_url)
            self.create_mock_response(edited_url)

            with self.settings(TEST_SUITE=False):
                with self.assertLogs(level="INFO") as info_logs:
                    # Run the queue processor. This will simulate the event for original_url being
                    # processed after the message has been edited.
                    FetchLinksEmbedData().consume(event)
            self.assertTrue(
                "INFO:root:Time spent on get_link_embed_data for http://test.org/: "
                in info_logs.output[0]
            )
            msg = Message.objects.select_related("sender").get(id=msg_id)
            # The content of the message has changed since the event for original_url has been created,
            # it should not be rendered. Another, up-to-date event will have been sent (edited_url).
            self.assertNotIn(
                f'<a href="{original_url}" title="The Rock">The Rock</a>', msg.rendered_content
            )

            self.assertTrue(responses.assert_call_count(edited_url, 0))

            with self.settings(TEST_SUITE=False):
                with self.assertLogs(level="INFO") as info_logs:
                    # Now proceed with the original queue_json_publish and call the
                    # up-to-date event for edited_url.
                    queue_json_publish(*args, **kwargs)
                    msg = Message.objects.select_related("sender").get(id=msg_id)
                    assert msg.rendered_content is not None
                    self.assertIn(
                        f'<a href="{edited_url}" title="The Rock">The Rock</a>',
                        msg.rendered_content,
                    )
            self.assertTrue(
                "INFO:root:Time spent on get_link_embed_data for http://edited.org/: "
                in info_logs.output[0]
            )
Example #52
0
    def test_queue_error_json(self) -> None:
        queue_client = get_queue_client()
        assert isinstance(queue_client, SimpleQueueClient)
        actual_publish = queue_client.publish

        self.counter = 0

        def throw_connection_error_once(self_obj: Any, *args: Any,
                                        **kwargs: Any) -> None:
            self.counter += 1
            if self.counter <= 1:
                raise AMQPConnectionError("test")
            actual_publish(*args, **kwargs)

        with mock.patch("zerver.lib.queue.SimpleQueueClient.publish",
                        throw_connection_error_once), self.assertLogs(
                            "zulip.queue", level="WARN") as warn_logs:
            queue_json_publish("test_suite", {"event": "my_event"})
        self.assertEqual(
            warn_logs.output,
            [
                "WARNING:zulip.queue:Failed to send to rabbitmq, trying to reconnect and send again"
            ],
        )

        assert queue_client.channel
        method, header, message = queue_client.channel.basic_get("test_suite")
        assert method is not None
        assert method.delivery_tag is not None
        assert message is not None
        queue_client.channel.basic_ack(method.delivery_tag)
        result = orjson.loads(message)
        self.assertEqual(result["event"], "my_event")

        method, header, message = queue_client.channel.basic_get("test_suite")
        assert message is None
Example #53
0
def maybe_enqueue_notifications(user_profile_id, message_id, private_message,
                                mentioned, stream_push_notify, stream_name,
                                always_push_notify, idle, already_notified):
    # type: (int, int, bool, bool, bool, Optional[str], bool, bool, Dict[str, bool]) -> Dict[str, bool]
    """This function has a complete unit test suite in
    `test_enqueue_notifications` that should be expanded as we add
    more features here."""
    notified = dict()  # type: Dict[str, bool]

    if (idle or always_push_notify) and (private_message or mentioned or stream_push_notify):
        notice = build_offline_notification(user_profile_id, message_id)
        if private_message:
            notice['trigger'] = 'private_message'
        elif mentioned:
            notice['trigger'] = 'mentioned'
        elif stream_push_notify:
            notice['trigger'] = 'stream_push_notify'
        else:
            raise AssertionError("Unknown notification trigger!")
        notice['stream_name'] = stream_name
        if not already_notified.get("push_notified"):
            queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
            notified['push_notified'] = True

    # Send missed_message emails if a private message or a
    # mention.  Eventually, we'll add settings to allow email
    # notifications to match the model of push notifications
    # above.
    if idle and (private_message or mentioned):
        # We require RabbitMQ to do this, as we can't call the email handler
        # from the Tornado process. So if there's no rabbitmq support do nothing
        if not already_notified.get("email_notified"):
            queue_json_publish("missedmessage_emails", notice, lambda notice: None)
            notified['email_notified'] = True

    return notified
def set_emoji_author(apps: StateApps,
                     schema_editor: DatabaseSchemaEditor) -> None:
    """
    This migration establishes the invariant that all RealmEmoji objects have .author set
    and queues events for reuploading all RealmEmoji.
    """

    RealmEmoji = apps.get_model("zerver", "RealmEmoji")
    Realm = apps.get_model("zerver", "Realm")
    UserProfile = apps.get_model("zerver", "UserProfile")
    ROLE_REALM_OWNER = 100

    realm_emoji_to_update = []
    for realm_emoji in RealmEmoji.objects.all():
        if realm_emoji.author_id is None:
            user_profile = (UserProfile.objects.filter(
                realm_id=realm_emoji.realm_id,
                is_active=True,
                role=ROLE_REALM_OWNER).order_by("id").first())
            realm_emoji.author_id = user_profile.id
            realm_emoji_to_update.append(realm_emoji)

    RealmEmoji.objects.bulk_update(realm_emoji_to_update, ["author_id"])

    if settings.TEST_SUITE:
        # There are no custom emoji in the test suite data set, and
        # the below code won't work because RabbitMQ isn't enabled for
        # the test suite.
        return

    for realm_id in Realm.objects.order_by("id").values_list("id", flat=True):
        event = {
            "type": "reupload_realm_emoji",
            "realm_id": realm_id,
        }
        queue_json_publish("deferred_work", event)
Example #55
0
def send_notification(data):
    # type: (Mapping[str, Any]) -> None
    queue_json_publish("notify_tornado", data, send_notification_http)
Example #56
0
def process_message_event(event_template, users):
    # type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
    missed_message_userids = set(event_template.get('missed_message_userids', []))
    sender_queue_id = event_template.get('sender_queue_id', None)  # type: Optional[str]
    message_dict_markdown = event_template['message_dict_markdown']  # type: Dict[str, Any]
    message_dict_no_markdown = event_template['message_dict_no_markdown']  # type: Dict[str, Any]
    sender_id = message_dict_markdown['sender_id']  # type: int
    message_id = message_dict_markdown['id']  # type: int
    message_type = message_dict_markdown['type']  # type: str
    sending_client = message_dict_markdown['client']  # type: Text

    # To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
    send_to_clients = {}  # type: Dict[str, Dict[str, Any]]

    # Extra user-specific data to include
    extra_user_data = {}  # type: Dict[int, Any]

    if 'stream_name' in event_template and not event_template.get("invite_only"):
        for client in get_client_descriptors_for_realm_all_streams(event_template['realm_id']):
            send_to_clients[client.event_queue.id] = {'client': client, 'flags': None}
            if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
                send_to_clients[client.event_queue.id]['is_sender'] = True

    for user_data in users:
        user_profile_id = user_data['id']  # type: int
        flags = user_data.get('flags', [])  # type: Iterable[str]

        for client in get_client_descriptors_for_user(user_profile_id):
            send_to_clients[client.event_queue.id] = {'client': client, 'flags': flags}
            if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
                send_to_clients[client.event_queue.id]['is_sender'] = True

        # If the recipient was offline and the message was a single or group PM to them
        # or they were @-notified potentially notify more immediately
        private_message = message_type == "private" and user_profile_id != sender_id
        mentioned = 'mentioned' in flags
        stream_push_notify = user_data.get('stream_push_notify', False)

        # We first check if a message is potentially mentionable,
        # since receiver_is_off_zulip is somewhat expensive.
        if private_message or mentioned or stream_push_notify:
            idle = receiver_is_off_zulip(user_profile_id) or (user_profile_id in missed_message_userids)
            always_push_notify = user_data.get('always_push_notify', False)
            notified = dict()  # type: Dict[str, bool]

            if (idle or always_push_notify) and (private_message or mentioned or stream_push_notify):
                notice = build_offline_notification(user_profile_id, message_id)
                notice['triggers'] = {
                    'private_message': private_message,
                    'mentioned': mentioned,
                    'stream_push_notify': stream_push_notify,
                }
                notice['stream_name'] = event_template.get('stream_name')
                queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
                notified['push_notified'] = True

            # Send missed_message emails if a private message or a
            # mention.  Eventually, we'll add settings to allow email
            # notifications to match the model of push notifications
            # above.
            if idle and (private_message or mentioned):
                # We require RabbitMQ to do this, as we can't call the email handler
                # from the Tornado process. So if there's no rabbitmq support do nothing
                queue_json_publish("missedmessage_emails", notice, lambda notice: None)
                notified['email_notified'] = True

            if len(notified) > 0:
                extra_user_data[user_profile_id] = notified

    for client_data in six.itervalues(send_to_clients):
        client = client_data['client']
        flags = client_data['flags']
        is_sender = client_data.get('is_sender', False)  # type: bool
        extra_data = extra_user_data.get(client.user_profile_id, None)  # type: Optional[Mapping[str, bool]]

        if not client.accepts_messages():
            # The actual check is the accepts_event() check below;
            # this line is just an optimization to avoid copying
            # message data unnecessarily
            continue

        if client.apply_markdown:
            message_dict = message_dict_markdown
        else:
            message_dict = message_dict_no_markdown

        # Make sure Zephyr mirroring bots know whether stream is invite-only
        if "mirror" in client.client_type_name and event_template.get("invite_only"):
            message_dict = message_dict.copy()
            message_dict["invite_only_stream"] = True

        if flags is not None:
            message_dict['is_mentioned'] = 'mentioned' in flags
        user_event = dict(type='message', message=message_dict, flags=flags)  # type: Dict[str, Any]
        if extra_data is not None:
            user_event.update(extra_data)

        if is_sender:
            local_message_id = event_template.get('local_id', None)
            if local_message_id is not None:
                user_event["local_message_id"] = local_message_id

        if not client.accepts_event(user_event):
            continue

        # The below prevents (Zephyr) mirroring loops.
        if ('mirror' in sending_client and
                sending_client.lower() == client.client_type_name.lower()):
            continue
        client.add_event(user_event)
Example #57
0
def queue_digest_recipient(user_profile, cutoff):
    # Convert cutoff to epoch seconds for transit.
    event = {"user_profile_id": user_profile.id,
             "cutoff": cutoff.strftime('%s')}
    queue_json_publish("digest_emails", event, lambda event: None)
Example #58
0
def queue_digest_recipient(user_profile: UserProfile, cutoff: datetime.datetime) -> None:
    # Convert cutoff to epoch seconds for transit.
    event = {"user_profile_id": user_profile.id,
             "cutoff": cutoff.strftime('%s')}
    queue_json_publish("digest_emails", event)
Example #59
0
def report_error(
    request: HttpRequest,
    user_profile: UserProfile,
    message: str = REQ(),
    stacktrace: str = REQ(),
    ui_message: bool = REQ(validator=check_bool),
    user_agent: str = REQ(),
    href: str = REQ(),
    log: str = REQ(),
    more_info: Optional[Dict[str, Any]] = REQ(validator=check_dict([]),
                                              default=None)
) -> HttpResponse:
    """Accepts an error report and stores in a queue for processing.  The
    actual error reports are later handled by do_report_error (below)"""
    if not settings.BROWSER_ERROR_REPORTING:
        return json_success()
    if more_info is None:
        more_info = {}

    js_source_map = get_js_source_map()
    if js_source_map:
        stacktrace = js_source_map.annotate_stacktrace(stacktrace)

    try:
        version = subprocess.check_output(
            ["git", "log", "HEAD^..HEAD", "--oneline"],
            universal_newlines=True)  # type: Optional[str]
    except Exception:
        version = None

    # Get the IP address of the request
    remote_ip = request.META.get('HTTP_X_REAL_IP')
    if remote_ip is None:
        remote_ip = request.META['REMOTE_ADDR']

    # For the privacy of our users, we remove any actual text content
    # in draft_content (from drafts rendering exceptions).  See the
    # comment on privacy_clean_markdown for more details.
    if more_info.get('draft_content'):
        more_info['draft_content'] = privacy_clean_markdown(
            more_info['draft_content'])

    queue_json_publish(
        'error_reports',
        dict(type="browser",
             report=dict(
                 host=request.get_host().split(":")[0],
                 ip_address=remote_ip,
                 user_email=user_profile.email,
                 user_full_name=user_profile.full_name,
                 user_visible=ui_message,
                 server_path=settings.DEPLOY_ROOT,
                 version=version,
                 user_agent=user_agent,
                 href=href,
                 message=message,
                 stacktrace=stacktrace,
                 log=log,
                 more_info=more_info,
             )))

    return json_success()