コード例 #1
0
    def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                   events: List[Dict[str, Any]]) -> None:
        consume_time_seconds: Optional[float] = None
        try:
            time_start = time.time()
            consume_func(events)
            consume_time_seconds = time.time() - time_start
            self.consumed_since_last_emptied += len(events)
        except Exception:
            self._handle_consume_exception(events)
        finally:
            flush_per_request_caches()
            reset_queries()

            if consume_time_seconds is not None:
                self.recent_consume_times.append((len(events), consume_time_seconds))

            if self.q is not None:
                remaining_queue_size = self.q.queue_size()
            else:
                remaining_queue_size = 0

            if remaining_queue_size == 0:
                self.queue_last_emptied_timestamp = time.time()
                self.consumed_since_last_emptied = 0

            self.consume_interation_counter += 1
            if self.consume_interation_counter >= self.CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM:

                self.consume_interation_counter = 0
                self.update_statistics(remaining_queue_size)
コード例 #2
0
ファイル: queue_processors.py プロジェクト: zfeed/zulip
 def consume_wrapper(self, data: Dict[str, Any]) -> None:
     try:
         self.consume(data)
     except Exception:
         self._handle_consume_exception([data])
     finally:
         reset_queries()
コード例 #3
0
    def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                   events: List[Dict[str, Any]]) -> None:
        consume_time_seconds: Optional[float] = None
        with configure_scope() as scope:
            scope.clear_breadcrumbs()
            add_breadcrumb(
                type='debug',
                category='queue_processor',
                message=f"Consuming {self.queue_name}",
                data={"events": events, "queue_size": self.get_remaining_queue_size()},
            )
        try:
            time_start = time.time()
            consume_func(events)
            consume_time_seconds = time.time() - time_start
            self.consumed_since_last_emptied += len(events)
        except Exception:
            self._handle_consume_exception(events)
        finally:
            flush_per_request_caches()
            reset_queries()

            if consume_time_seconds is not None:
                self.recent_consume_times.append((len(events), consume_time_seconds))

            remaining_queue_size = self.get_remaining_queue_size()
            if remaining_queue_size == 0:
                self.queue_last_emptied_timestamp = time.time()
                self.consumed_since_last_emptied = 0

            self.consume_interation_counter += 1
            if self.consume_interation_counter >= self.CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM:

                self.consume_interation_counter = 0
                self.update_statistics(remaining_queue_size)
コード例 #4
0
ファイル: queue_processors.py プロジェクト: BakerWang/zulip
 def start(self) -> None:  # nocoverage
     while True:
         # TODO: Probably it'd be better to share code with consume_wrapper()
         events = self.q.drain_queue(self.queue_name, json=True)
         try:
             self.consume_batch(events)
         finally:
             reset_queries()
         time.sleep(self.sleep_delay)
コード例 #5
0
 def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                events: List[Dict[str, Any]]) -> None:
     try:
         consume_func(events)
     except Exception:
         self._handle_consume_exception(events)
     finally:
         flush_per_request_caches()
         reset_queries()
コード例 #6
0
ファイル: queue_processors.py プロジェクト: xshengshe/zulip
 def start(self) -> None:  # nocoverage
     while True:
         # TODO: Probably it'd be better to share code with consume_wrapper()
         events = self.q.drain_queue(self.queue_name, json=True)
         try:
             self.consume_batch(events)
         finally:
             reset_queries()
         time.sleep(self.sleep_delay)
コード例 #7
0
    def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                   events: List[Dict[str, Any]]) -> None:
        consume_time_seconds: Optional[float] = None
        with configure_scope() as scope:
            scope.clear_breadcrumbs()
            add_breadcrumb(
                type='debug',
                category='queue_processor',
                message=f"Consuming {self.queue_name}",
                data={"events": events, "queue_size": self.get_remaining_queue_size()},
            )
        try:
            if self.idle:
                # We're reactivating after having gone idle due to emptying the queue.
                # We should update the stats file to keep it fresh and to make it clear
                # that the queue started processing, in case the event we're about to process
                # makes us freeze.
                self.idle = False
                self.update_statistics(self.get_remaining_queue_size())

            time_start = time.time()
            if self.MAX_CONSUME_SECONDS and self.ENABLE_TIMEOUTS:
                signal.signal(signal.SIGALRM, functools.partial(timer_expired, self.MAX_CONSUME_SECONDS, len(events)))
                signal.alarm(self.MAX_CONSUME_SECONDS * len(events))
                consume_func(events)
                signal.alarm(0)
            else:
                consume_func(events)
            consume_time_seconds = time.time() - time_start
            self.consumed_since_last_emptied += len(events)
        except Exception as e:
            self._handle_consume_exception(events, e)
        finally:
            flush_per_request_caches()
            reset_queries()

            if consume_time_seconds is not None:
                self.recent_consume_times.append((len(events), consume_time_seconds))

            remaining_queue_size = self.get_remaining_queue_size()
            if remaining_queue_size == 0:
                self.queue_last_emptied_timestamp = time.time()
                self.consumed_since_last_emptied = 0
                # We've cleared all the events from the queue, so we don't
                # need to worry about the small overhead of doing a disk write.
                # We take advantage of this to update the stats file to keep it fresh,
                # especially since the queue might go idle until new events come in.
                self.update_statistics(0)
                self.idle = True
                return

            self.consume_iteration_counter += 1
            if (self.consume_iteration_counter >= self.CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM
                    or time.time() - self.last_statistics_update_time >= self.MAX_SECONDS_BEFORE_UPDATE_STATS):
                self.consume_iteration_counter = 0
                self.update_statistics(remaining_queue_size)
コード例 #8
0
ファイル: middleware.py プロジェクト: akashnimare/zulip
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
    if settings.PROFILE_ALL_REQUESTS:
        log_data["prof"] = cProfile.Profile()
        log_data["prof"].enable()

    reset_queries()
    log_data['time_started'] = time.time()
    log_data['remote_cache_time_start'] = get_remote_cache_time()
    log_data['remote_cache_requests_start'] = get_remote_cache_requests()
    log_data['bugdown_time_start'] = get_bugdown_time()
    log_data['bugdown_requests_start'] = get_bugdown_requests()
コード例 #9
0
ファイル: middleware.py プロジェクト: eunjeung/2018-2-OSS-L5
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
    if settings.PROFILE_ALL_REQUESTS:
        log_data["prof"] = cProfile.Profile()
        log_data["prof"].enable()

    reset_queries()
    log_data['time_started'] = time.time()
    log_data['remote_cache_time_start'] = get_remote_cache_time()
    log_data['remote_cache_requests_start'] = get_remote_cache_requests()
    log_data['bugdown_time_start'] = get_bugdown_time()
    log_data['bugdown_requests_start'] = get_bugdown_requests()
コード例 #10
0
    def start(self) -> None:  # nocoverage
        while True:
            # TODO: Probably it'd be better to share code with consume_wrapper()
            events = self.q.drain_queue(self.queue_name, json=True)
            try:
                self.consume_batch(events)
            finally:
                reset_queries()

            # To avoid spinning the CPU, we go to sleep if there's
            # nothing in the queue, or for certain queues with
            # sleep_only_if_empty=False, unconditionally.
            if not self.sleep_only_if_empty or len(events) == 0:
                time.sleep(self.sleep_delay)
コード例 #11
0
ファイル: queue_processors.py プロジェクト: 150vb/zulip
 def consume_wrapper(self, data):
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)
         fname = '%s.errors' % (self.queue_name,)
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = u'%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'ab') as f:
                 f.write(line.encode('utf-8'))
     reset_queries()
コード例 #12
0
ファイル: queue_processors.py プロジェクト: zag/zulip
 def consume_wrapper(self, data):
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)
         fname = '%s.errors' % (self.queue_name, )
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'a') as f:
                 f.write(line)
     reset_queries()
コード例 #13
0
ファイル: queue_processors.py プロジェクト: zfeed/zulip
    def start(self) -> None:  # nocoverage
        while True:
            events = self.q.drain_queue(self.queue_name, json=True)
            try:
                self.consume_batch(events)
            except Exception:
                self._handle_consume_exception(events)
            finally:
                reset_queries()

            # To avoid spinning the CPU, we go to sleep if there's
            # nothing in the queue, or for certain queues with
            # sleep_only_if_empty=False, unconditionally.
            if not self.sleep_only_if_empty or len(events) == 0:
                time.sleep(self.sleep_delay)
コード例 #14
0
ファイル: queue_processors.py プロジェクト: zeenfaizpy/zulip
    def start(self):
        while True:
            missed_events = self.q.drain_queue("missedmessage_emails", json=True)
            by_recipient = defaultdict(list) # type: Dict[int, List[Dict[str, Any]]]

            for event in missed_events:
                logging.info("Received event: %s" % (event,))
                by_recipient[event['user_profile_id']].append(event)

            for user_profile_id, events in by_recipient.items():
                handle_missedmessage_emails(user_profile_id, events)

            reset_queries()
            # Aggregate all messages received every 2 minutes to let someone finish sending a batch
            # of messages
            time.sleep(2 * 60)
コード例 #15
0
ファイル: queue_processors.py プロジェクト: 150vb/zulip
    def process_one_batch(self):
        slow_queries = self.q.drain_queue("slow_queries", json=True)

        if settings.ERROR_BOT is None:
            return

        if len(slow_queries) > 0:
            topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)

            content = ""
            for query in slow_queries:
                content += "    %s\n" % (query,)

            internal_send_message(settings.ERROR_BOT, "stream", "logs", topic, content)

        reset_queries()
コード例 #16
0
ファイル: queue_processors.py プロジェクト: 150vb/zulip
    def start(self):
        while True:
            missed_events = self.q.drain_queue("missedmessage_emails", json=True)
            by_recipient = defaultdict(list) # type: Dict[int, List[Dict[str, Any]]]

            for event in missed_events:
                logging.info("Received event: %s" % (event,))
                by_recipient[event['user_profile_id']].append(event)

            for user_profile_id, events in by_recipient.items():
                handle_missedmessage_emails(user_profile_id, events)

            reset_queries()
            # Aggregate all messages received every 2 minutes to let someone finish sending a batch
            # of messages
            time.sleep(2 * 60)
コード例 #17
0
ファイル: queue_processors.py プロジェクト: zeenfaizpy/zulip
    def process_one_batch(self):
        slow_queries = self.q.drain_queue("slow_queries", json=True)

        if settings.ERROR_BOT is None:
            return

        if len(slow_queries) > 0:
            topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)

            content = ""
            for query in slow_queries:
                content += "    %s\n" % (query,)

            internal_send_message(settings.ERROR_BOT, "stream", "logs", topic, content)

        reset_queries()
コード例 #18
0
ファイル: queue_processors.py プロジェクト: xshengshe/zulip
 def consume_wrapper(self, data: Dict[str, Any]) -> None:
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
         fname = '%s.errors' % (self.queue_name, )
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'ab') as f:
                 f.write(line.encode('utf-8'))
         check_and_send_restart_signal()
     finally:
         reset_queries()
コード例 #19
0
ファイル: queue_processors.py プロジェクト: BakerWang/zulip
 def consume_wrapper(self, data: Dict[str, Any]) -> None:
     try:
         self.consume(data)
     except Exception:
         self._log_problem()
         if not os.path.exists(settings.QUEUE_ERROR_DIR):
             os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
         fname = '%s.errors' % (self.queue_name,)
         fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
         line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
         lock_fn = fn + '.lock'
         with lockfile(lock_fn):
             with open(fn, 'ab') as f:
                 f.write(line.encode('utf-8'))
         check_and_send_restart_signal()
     finally:
         reset_queries()
コード例 #20
0
ファイル: queue_processors.py プロジェクト: aakash-cr7/zulip
    def process_one_batch(self):
        # type: () -> None
        slow_queries = self.q.drain_queue("slow_queries", json=True)

        if settings.ERROR_BOT is None:
            return

        if len(slow_queries) > 0:
            topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)

            content = ""
            for query in slow_queries:
                content += "    %s\n" % (query,)

            error_bot_realm = get_user_profile_by_email(settings.ERROR_BOT).realm
            internal_send_message(error_bot_realm, settings.ERROR_BOT,
                                  "stream", "logs", topic, content)

        reset_queries()
コード例 #21
0
    def process_one_batch(self):
        # type: () -> None
        slow_queries = self.q.drain_queue("slow_queries", json=True)

        if settings.ERROR_BOT is None:
            return

        if len(slow_queries) > 0:
            topic = "%s: slow queries" % (settings.EXTERNAL_HOST, )

            content = ""
            for query in slow_queries:
                content += "    %s\n" % (query, )

            error_bot_realm = get_system_bot(settings.ERROR_BOT).realm
            internal_send_message(error_bot_realm, settings.ERROR_BOT,
                                  "stream", "logs", topic, content)

        reset_queries()