Exemplo n.º 1
0
 def _handle_consume_exception(self, events: List[Dict[str, Any]],
                               exception: Exception) -> None:
     with configure_scope() as scope:
         scope.set_context("events", {
             "data": events,
             "queue_name": self.queue_name,
         })
         if isinstance(exception, WorkerTimeoutException):
             with sentry_sdk.push_scope() as scope:
                 scope.fingerprint = ['worker-timeout', self.queue_name]
                 logging.exception("%s in queue %s",
                                   str(exception),
                                   self.queue_name,
                                   stack_info=True)
         else:
             logging.exception("Problem handling data on queue %s",
                               self.queue_name,
                               stack_info=True)
     if not os.path.exists(settings.QUEUE_ERROR_DIR):
         os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
     # Use 'mark_sanitized' to prevent Pysa from detecting this false positive
     # flow. 'queue_name' is always a constant string.
     fname = mark_sanitized(f'{self.queue_name}.errors')
     fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
     line = f'{time.asctime()}\t{orjson.dumps(events).decode()}\n'
     lock_fn = fn + '.lock'
     with lockfile(lock_fn):
         with open(fn, 'a') as f:
             f.write(line)
     check_and_send_restart_signal()
Exemplo n.º 2
0
    def handle(self, *args, **options):
        # type: (*Any, **Any) -> None
        # TODO: this only acquires a lock on the system, not on the DB:
        # be careful not to run this on multiple systems.

        # In the meantime, we have an option to prevent this job from
        # running on >1 machine
        if settings.EMAIL_DELIVERER_DISABLED:
            return

        with lockfile("/tmp/zulip_email_deliver.lockfile"):
            while True:
                email_jobs_to_deliver = ScheduledEmail.objects.filter(
                    scheduled_timestamp__lte=timezone_now())
                if email_jobs_to_deliver:
                    for job in email_jobs_to_deliver:
                        try:
                            send_email(**loads(job.data))
                            job.delete()
                        except EmailNotDeliveredException:
                            logger.warn("%r not delivered" % (job, ))
                    time.sleep(10)
                else:
                    # Less load on the db during times of activity, and more responsiveness when the load is low
                    time.sleep(2)
Exemplo n.º 3
0
    def update_statistics(self, remaining_local_queue_size: int) -> None:
        total_seconds = sum(seconds
                            for _, seconds in self.recent_consume_times)
        total_events = sum(events_number
                           for events_number, _ in self.recent_consume_times)
        if total_events == 0:
            recent_average_consume_time = None
        else:
            recent_average_consume_time = total_seconds / total_events
        stats_dict = dict(
            update_time=time.time(),
            recent_average_consume_time=recent_average_consume_time,
            current_queue_size=remaining_local_queue_size,
            queue_last_emptied_timestamp=self.queue_last_emptied_timestamp,
            consumed_since_last_emptied=self.consumed_since_last_emptied,
        )

        os.makedirs(settings.QUEUE_STATS_DIR, exist_ok=True)

        fname = f"{self.queue_name}.stats"
        fn = os.path.join(settings.QUEUE_STATS_DIR, fname)
        with lockfile(fn + ".lock"):
            tmp_fn = fn + ".tmp"
            with open(tmp_fn, "wb") as f:
                f.write(
                    orjson.dumps(stats_dict,
                                 option=orjson.OPT_APPEND_NEWLINE
                                 | orjson.OPT_INDENT_2))
            os.rename(tmp_fn, fn)
        self.last_statistics_update_time = time.time()
Exemplo n.º 4
0
    def _handle_consume_exception(self, events: List[Dict[str, Any]],
                                  exception: Exception) -> None:
        if isinstance(exception, InterruptConsumeException):
            # The exception signals that no further error handling
            # is needed and the worker can proceed.
            return

        with configure_scope() as scope:
            scope.set_context(
                "events",
                {
                    "data": events,
                    "queue_name": self.queue_name,
                },
            )
            if isinstance(exception, WorkerTimeoutException):
                with sentry_sdk.push_scope() as scope:
                    scope.fingerprint = ["worker-timeout", self.queue_name]
                    logging.exception(exception, stack_info=True)
            else:
                logging.exception("Problem handling data on queue %s",
                                  self.queue_name,
                                  stack_info=True)
        if not os.path.exists(settings.QUEUE_ERROR_DIR):
            os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
        # Use 'mark_sanitized' to prevent Pysa from detecting this false positive
        # flow. 'queue_name' is always a constant string.
        fname = mark_sanitized(f"{self.queue_name}.errors")
        fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
        line = f"{time.asctime()}\t{orjson.dumps(events).decode()}\n"
        lock_fn = fn + ".lock"
        with lockfile(lock_fn):
            with open(fn, "a") as f:
                f.write(line)
        check_and_send_restart_signal()
Exemplo n.º 5
0
    def handle(self, *args: Any, **options: Any) -> None:

        if settings.EMAIL_DELIVERER_DISABLED:
            sleep_forever()

        with lockfile("/tmp/zulip_email_deliver.lockfile"):
            while True:
                email_jobs_to_deliver = ScheduledEmail.objects.filter(
                    scheduled_timestamp__lte=timezone_now())
                if email_jobs_to_deliver:
                    for job in email_jobs_to_deliver:
                        # Reformat any jobs that used the old to_email
                        # and to_user_ids argument formats.
                        if 'to_email' in job:
                            job['to_emails'] = [job['to_email']]
                            del job['to_email']
                        if 'to_user_ids' in job:
                            job['to_user_ids'] = [job['to_user_id']]
                            del job['to_user_ids']
                        try:
                            send_email(**loads(job.data))
                            job.delete()
                        except EmailNotDeliveredException:
                            logger.warning("%r not delivered" % (job, ))
                    time.sleep(10)
                else:
                    # Less load on the db during times of activity,
                    # and more responsiveness when the load is low
                    time.sleep(2)
Exemplo n.º 6
0
    def handle(self, *args: Any, **options: Any) -> None:

        if settings.EMAIL_DELIVERER_DISABLED:
            # Here doing a check and sleeping indefinitely on this setting might
            # not sound right. Actually we do this check to avoid running this
            # process on every server that might be in service to a realm. See
            # the comment in zproject/settings.py file about renaming this setting.
            while True:
                time.sleep(10 * 9)

        with lockfile("/tmp/zulip_scheduled_message_deliverer.lockfile"):
            while True:
                messages_to_deliver = ScheduledMessage.objects.filter(
                    scheduled_timestamp__lte=timezone_now(), delivered=False)
                if messages_to_deliver:
                    for message in messages_to_deliver:
                        with transaction.atomic():
                            do_send_messages([self.construct_message(message)])
                            message.delivered = True
                            message.save(update_fields=['delivered'])

                cur_time = timezone_now()
                time_next_min = (cur_time + timedelta(minutes=1)).replace(
                    second=0, microsecond=0)
                sleep_time = (time_next_min - cur_time).total_seconds()
                time.sleep(sleep_time)
Exemplo n.º 7
0
    def update_statistics(self, remaining_queue_size: int) -> None:
        total_seconds = sum(
            [seconds for _, seconds in self.recent_consume_times])
        total_events = sum(
            [events_number for events_number, _ in self.recent_consume_times])
        if total_events == 0:
            recent_average_consume_time = None
        else:
            recent_average_consume_time = total_seconds / total_events
        stats_dict = dict(
            update_time=time.time(),
            recent_average_consume_time=recent_average_consume_time,
            current_queue_size=remaining_queue_size,
            queue_last_emptied_timestamp=self.queue_last_emptied_timestamp,
            consumed_since_last_emptied=self.consumed_since_last_emptied,
        )

        os.makedirs(settings.QUEUE_STATS_DIR, exist_ok=True)

        fname = '%s.stats' % (self.queue_name, )
        fn = os.path.join(settings.QUEUE_STATS_DIR, fname)
        with lockfile(fn + '.lock'):
            tmp_fn = fn + '.tmp'
            with open(tmp_fn, 'w') as f:
                serialized_dict = ujson.dumps(stats_dict, indent=2)
                serialized_dict += '\n'
                f.write(serialized_dict)
            os.rename(tmp_fn, fn)
Exemplo n.º 8
0
    def handle(self, *args, **options):
        # type: (*Any, **Any) -> None
        # TODO: this only acquires a lock on the system, not on the DB:
        # be careful not to run this on multiple systems.

        # In the meantime, we have an option to prevent this job from
        # running on >1 machine
        if settings.EMAIL_DELIVERER_DISABLED:
            return

        with lockfile("/tmp/zulip_email_deliver.lockfile"):
            while True:
                # make sure to use utcnow, otherwise it gets confused when you set the time with utcnow(),
                # and select with now()
                email_jobs_to_deliver = ScheduledJob.objects.filter(
                    type=ScheduledJob.EMAIL,
                    scheduled_timestamp__lte=datetime.utcnow())
                if email_jobs_to_deliver:
                    for job in email_jobs_to_deliver:
                        if not send_email_job(job):
                            logger.warn(
                                "No exception raised, but %r sent as 0 bytes" %
                                (job, ))
                        else:
                            job.delete()
                    time.sleep(10)
                else:
                    # Less load on the db during times of activity, and more responsiveness when the load is low
                    time.sleep(2)
Exemplo n.º 9
0
    def handle(self, *args: Any, **options: Any) -> None:

        if settings.EMAIL_DELIVERER_DISABLED:
            # Here doing a check and sleeping indefinitely on this setting might
            # not sound right. Actually we do this check to avoid running this
            # process on every server that might be in service to a realm. See
            # the comment in zproject/settings.py file about renaming this setting.
            sleep_forever()

        with lockfile("/tmp/zulip_scheduled_message_deliverer.lockfile"):
            while True:
                messages_to_deliver = ScheduledMessage.objects.filter(
                    scheduled_timestamp__lte=timezone_now(),
                    delivered=False)
                if messages_to_deliver:
                    for message in messages_to_deliver:
                        with transaction.atomic():
                            do_send_messages([self.construct_message(message)])
                            message.delivered = True
                            message.save(update_fields=['delivered'])

                cur_time = timezone_now()
                time_next_min = (cur_time + timedelta(minutes=1)).replace(second=0, microsecond=0)
                sleep_time = (time_next_min - cur_time).total_seconds()
                time.sleep(sleep_time)
Exemplo n.º 10
0
    def handle(self, *args, **options):
        # type: (*Any, **Any) -> None
        # TODO: this only acquires a lock on the system, not on the DB:
        # be careful not to run this on multiple systems.

        # In the meantime, we have an option to prevent this job from
        # running on >1 machine
        if settings.EMAIL_DELIVERER_DISABLED:
            return

        with lockfile("/tmp/zulip_email_deliver.lockfile"):
            while True:
                # make sure to use utcnow, otherwise it gets confused when you set the time with utcnow(),
                # and select with now()
                email_jobs_to_deliver = ScheduledJob.objects.filter(type=ScheduledJob.EMAIL,
                                                                    scheduled_timestamp__lte=datetime.utcnow())
                if email_jobs_to_deliver:
                    for job in email_jobs_to_deliver:
                        if not send_email_job(job):
                            logger.warn("No exception raised, but %r sent as 0 bytes" % (job,))
                        else:
                            job.delete()
                    time.sleep(10)
                else:
                    # Less load on the db during times of activity, and more responsiveness when the load is low
                    time.sleep(2)
Exemplo n.º 11
0
 def _handle_consume_exception(self, events: List[Dict[str, Any]]) -> None:
     self._log_problem()
     if not os.path.exists(settings.QUEUE_ERROR_DIR):
         os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
     fname = '%s.errors' % (self.queue_name, )
     fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
     line = '%s\t%s\n' % (time.asctime(), ujson.dumps(events))
     lock_fn = fn + '.lock'
     with lockfile(lock_fn):
         with open(fn, 'ab') as f:
             f.write(line.encode('utf-8'))
     check_and_send_restart_signal()
Exemplo n.º 12
0
 def _handle_consume_exception(self, events: List[Dict[str, Any]]) -> None:
     self._log_problem()
     if not os.path.exists(settings.QUEUE_ERROR_DIR):
         os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
     # Use 'mark_sanitized' to prevent Pysa from detecting this false positive
     # flow. 'queue_name' is always a constant string.
     fname = mark_sanitized(f'{self.queue_name}.errors')
     fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
     line = f'{time.asctime()}\t{orjson.dumps(events).decode()}\n'
     lock_fn = fn + '.lock'
     with lockfile(lock_fn):
         with open(fn, 'ab') as f:
             f.write(line.encode('utf-8'))
     check_and_send_restart_signal()
Exemplo n.º 13
0
 def consume_wrapper(self, data):
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)
         fname = '%s.errors' % (self.queue_name, )
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'a') as f:
                 f.write(line)
     reset_queries()
Exemplo n.º 14
0
 def consume_wrapper(self, data):
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)
         fname = '%s.errors' % (self.queue_name,)
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = u'%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'ab') as f:
                 f.write(line.encode('utf-8'))
     reset_queries()
Exemplo n.º 15
0
 def consume_wrapper(self, data: Dict[str, Any]) -> None:
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
         fname = '%s.errors' % (self.queue_name,)
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'ab') as f:
                 f.write(line.encode('utf-8'))
         check_and_send_restart_signal()
     finally:
         reset_queries()
Exemplo n.º 16
0
 def consume_wrapper(self, data: Dict[str, Any]) -> None:
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
         fname = '%s.errors' % (self.queue_name, )
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'ab') as f:
                 f.write(line.encode('utf-8'))
         check_and_send_restart_signal()
     finally:
         reset_queries()
Exemplo n.º 17
0
    def handle(self, *args: Any, **options: Any) -> None:
        with lockfile("/tmp/zulip_scheduled_message_deliverer.lockfile"):
            while True:
                messages_to_deliver = ScheduledMessage.objects.filter(
                    scheduled_timestamp__lte=timezone_now(), delivered=False)
                if messages_to_deliver:
                    for message in messages_to_deliver:
                        with transaction.atomic():
                            do_send_messages([self.construct_message(message)])
                            message.delivered = True
                            message.save(update_fields=['delivered'])

                cur_time = timezone_now()
                time_next_min = (cur_time + timedelta(minutes=1)).replace(
                    second=0, microsecond=0)
                sleep_time = (time_next_min - cur_time).total_seconds()
                time.sleep(sleep_time)
    def handle(self, *args: Any, **options: Any) -> None:
        if not settings.BILLING_PROCESSOR_ENABLED:
            sleep_forever()

        with lockfile("/tmp/zulip_billing_processor.lockfile"):
            while True:
                for processor in BillingProcessor.objects.exclude(
                        state=BillingProcessor.STALLED):
                    try:
                        entry_processed = run_billing_processor_one_step(processor)
                    except StripeConnectionError:
                        time.sleep(5*60)
                    # Less load on the db during times of activity
                    # and more responsiveness when the load is low
                    if entry_processed:
                        time.sleep(10)
                    else:
                        time.sleep(2)
Exemplo n.º 19
0
    def handle(self, *args: Any, **options: Any) -> None:

        if settings.EMAIL_DELIVERER_DISABLED:
            while True:
                time.sleep(10*9)

        with lockfile("/tmp/zulip_email_deliver.lockfile"):
            while True:
                email_jobs_to_deliver = ScheduledEmail.objects.filter(scheduled_timestamp__lte=timezone_now())
                if email_jobs_to_deliver:
                    for job in email_jobs_to_deliver:
                        try:
                            send_email(**loads(job.data))
                            job.delete()
                        except EmailNotDeliveredException:
                            logger.warning("%r not delivered" % (job,))
                    time.sleep(10)
                else:
                    # Less load on the db during times of activity, and more responsiveness when the load is low
                    time.sleep(2)
Exemplo n.º 20
0
    def handle(self, *args: Any, **options: Any) -> None:

        if settings.EMAIL_DELIVERER_DISABLED:
            time.sleep(10**9)  # "infinity seconds"

        with lockfile("/tmp/zulip_email_deliver.lockfile"):
            while True:
                email_jobs_to_deliver = ScheduledEmail.objects.filter(
                    scheduled_timestamp__lte=timezone_now())
                if email_jobs_to_deliver:
                    for job in email_jobs_to_deliver:
                        try:
                            send_email(**loads(job.data))
                            job.delete()
                        except EmailNotDeliveredException:
                            logger.warning("%r not delivered" % (job, ))
                    time.sleep(10)
                else:
                    # Less load on the db during times of activity,
                    # and more responsiveness when the load is low
                    time.sleep(2)