コード例 #1
0
class LoopQueueProcessingWorker(QueueProcessingWorker):
    sleep_delay = 1
    batch_size = 100

    def setup(self) -> None:
        self.q = SimpleQueueClient(
            prefetch=max(self.PREFETCH, self.batch_size))

    def start(self) -> None:  # nocoverage
        assert self.q is not None
        self.initialize_statistics()
        self.q.start_json_consumer(
            self.queue_name,
            lambda events: self.do_consume(self.consume_batch, events),
            batch_size=self.batch_size,
            timeout=self.sleep_delay,
        )

    @abstractmethod
    def consume_batch(self, events: List[Dict[str, Any]]) -> None:
        pass

    def consume(self, event: Dict[str, Any]) -> None:
        """In LoopQueueProcessingWorker, consume is used just for automated tests"""
        self.consume_batch([event])
コード例 #2
0
ファイル: purge_queue.py プロジェクト: zeeshanqamar/zulip
 def handle(self, *args, **options):
     # type: (*Any, **str) -> None
     queue_name = options['queue_name']
     queue = SimpleQueueClient()
     queue.ensure_queue(queue_name, lambda: None)
     queue.channel.queue_purge(queue_name)
     print("Done")
コード例 #3
0
    def handle(self, *args: Any, **options: Any) -> None:
        print("Purging queue...")
        queue = SimpleQueueClient()
        queue_name = "noop_batch" if options["batch"] else "noop"
        queue.ensure_queue(queue_name,
                           lambda channel: channel.queue_purge("noop"))
        count = options["count"]
        reps = options["reps"]

        with open(options["csv"], "w", newline="") as csvfile:
            writer = csv.DictWriter(
                csvfile,
                fieldnames=["Queue size", "Queue type", "Prefetch", "Rate"])
            writer.writeheader()

            for prefetch in options["prefetches"]:
                print(f"Queue size {count}, prefetch {prefetch}...")
                worker: Union[NoopWorker, BatchNoopWorker] = NoopWorker(
                    count, options["slow"])
                if options["batch"]:
                    worker = BatchNoopWorker(count, options["slow"])
                    if prefetch > 0 and prefetch < worker.batch_size:
                        print(
                            f"    Skipping, as prefetch {prefetch} is less than batch size {worker.batch_size}"
                        )
                        continue
                worker.ENABLE_TIMEOUTS = True
                worker.setup()

                assert worker.q is not None
                assert worker.q.channel is not None
                worker.q.channel.basic_qos(prefetch_count=prefetch)

                total_time = 0.0
                for i in range(1, reps + 1):
                    worker.consumed = 0
                    timeit(
                        lambda: queue_json_publish(queue_name, {}),
                        number=count,
                    )
                    duration = timeit(
                        lambda: worker.start(),
                        number=1,
                    )
                    print(
                        f"    {i}/{reps}: {count}/{duration}s = {count / duration}/s"
                    )
                    total_time += duration
                    writer.writerow({
                        "Queue size": count,
                        "Queue type": queue_name,
                        "Prefetch": prefetch,
                        "Rate": count / duration,
                    })
                    csvfile.flush()
                print(
                    f"  Overall: {reps * count}/{total_time}s = {(reps * count) / total_time}/s"
                )
コード例 #4
0
ファイル: queue_rate.py プロジェクト: yushao2/zulip
    def handle(self, *args: Any, **options: Any) -> None:
        print("Purging queue...")
        queue = SimpleQueueClient()
        queue_name = "noop_batch" if options["batch"] else "noop"
        queue.ensure_queue(queue_name,
                           lambda channel: channel.queue_purge("noop"))

        count = options["count"]
        reps = options["reps"]

        worker: QueueProcessingWorker = NoopWorker(count, options["slow"])
        if options["batch"]:
            worker = BatchNoopWorker(count, options["slow"])
        worker.ENABLE_TIMEOUTS = True
        worker.setup()
        assert worker.q is not None
        assert worker.q.channel is not None
        worker.q.channel.basic_qos(prefetch_count=options["prefetch"])

        total_enqueue_time = 0.0
        total_dequeue_time = 0.0

        def one_rep() -> None:
            nonlocal total_enqueue_time, total_dequeue_time
            total_enqueue_time += timeit(
                lambda: queue_json_publish(queue_name, {}),
                number=count,
            )
            total_dequeue_time += timeit(
                lambda: worker.start(),
                number=1,
            )

        rate = lambda time, iterations: int(iterations / time)

        total_reps_time = timeit(one_rep, number=reps)
        if reps > 1:
            print(f"Total rate per rep: {rate(total_reps_time, reps)} / sec")

        print(f"Enqueue rate: {rate(total_enqueue_time, count * reps)} / sec")
        print(f"Dequeue rate: {rate(total_dequeue_time, count * reps)} / sec")
コード例 #5
0
ファイル: queue_processors.py プロジェクト: bgupta/zulip
class QueueProcessingWorker(object):
    def __init__(self):
        self.q = SimpleQueueClient()

    def consume_wrapper(self, data):
        try:
            self.consume(data)
        except Exception:
            self._log_problem()
            if not os.path.exists(settings.QUEUE_ERROR_DIR):
                os.mkdir(settings.QUEUE_ERROR_DIR)
            fname = '%s.errors' % (self.queue_name,)
            fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
            line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
            lock_fn = fn + '.lock'
            with lockfile(lock_fn):
                with open(fn, 'a') as f:
                    f.write(line)
        reset_queries()

    def _log_problem(self):
        logging.exception("Problem handling data on queue %s" % (self.queue_name,))

    def start(self):
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self):
        self.q.stop_consuming()
コード例 #6
0
ファイル: queue_processors.py プロジェクト: zag/zulip
class QueueProcessingWorker(object):
    def __init__(self):
        self.q = SimpleQueueClient()

    def consume_wrapper(self, data):
        try:
            self.consume(data)
        except Exception:
            self._log_problem()
            if not os.path.exists(settings.QUEUE_ERROR_DIR):
                os.mkdir(settings.QUEUE_ERROR_DIR)
            fname = '%s.errors' % (self.queue_name, )
            fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
            line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
            lock_fn = fn + '.lock'
            with lockfile(lock_fn):
                with open(fn, 'a') as f:
                    f.write(line)
        reset_queries()

    def _log_problem(self):
        logging.exception("Problem handling data on queue %s" %
                          (self.queue_name, ))

    def start(self):
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self):
        self.q.stop_consuming()
コード例 #7
0
def need_to_run_configure_rabbitmq(settings_list: List[str]) -> bool:
    obsolete = is_digest_obsolete('last_configure_rabbitmq_hash',
                                  configure_rabbitmq_paths(), settings_list)

    if obsolete:
        return True

    try:
        from zerver.lib.queue import SimpleQueueClient
        SimpleQueueClient()
        return False
    except Exception:
        return True
コード例 #8
0
class QueueProcessingWorker(ABC):
    queue_name = None  # type: str

    def __init__(self) -> None:
        self.q = None  # type: SimpleQueueClient
        if self.queue_name is None:
            raise WorkerDeclarationException(
                "Queue worker declared without queue_name")

    @abstractmethod
    def consume(self, data: Dict[str, Any]) -> None:
        pass

    def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                   events: List[Dict[str, Any]]) -> None:
        try:
            consume_func(events)
        except Exception:
            self._handle_consume_exception(events)
        finally:
            flush_per_request_caches()
            reset_queries()

    def consume_wrapper(self, data: Dict[str, Any]) -> None:
        consume_func = lambda events: self.consume(events[0])
        self.do_consume(consume_func, [data])

    def _handle_consume_exception(self, events: List[Dict[str, Any]]) -> None:
        self._log_problem()
        if not os.path.exists(settings.QUEUE_ERROR_DIR):
            os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
        fname = '%s.errors' % (self.queue_name, )
        fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
        line = '%s\t%s\n' % (time.asctime(), ujson.dumps(events))
        lock_fn = fn + '.lock'
        with lockfile(lock_fn):
            with open(fn, 'ab') as f:
                f.write(line.encode('utf-8'))
        check_and_send_restart_signal()

    def _log_problem(self) -> None:
        logging.exception("Problem handling data on queue %s" %
                          (self.queue_name, ))

    def setup(self) -> None:
        self.q = SimpleQueueClient()

    def start(self) -> None:
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self) -> None:  # nocoverage
        self.q.stop_consuming()
コード例 #9
0
ファイル: queue_processors.py プロジェクト: vishnudut/zulip
class QueueProcessingWorker:
    queue_name = None  # type: str

    def __init__(self):
        # type: () -> None
        self.q = None  # type: SimpleQueueClient
        if self.queue_name is None:
            raise WorkerDeclarationException(
                "Queue worker declared without queue_name")

    def consume(self, data):
        # type: (Dict[str, Any]) -> None
        raise WorkerDeclarationException("No consumer defined!")

    def consume_wrapper(self, data):
        # type: (Dict[str, Any]) -> None
        try:
            self.consume(data)
        except Exception:
            self._log_problem()
            if not os.path.exists(settings.QUEUE_ERROR_DIR):
                os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
            fname = '%s.errors' % (self.queue_name, )
            fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
            line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
            lock_fn = fn + '.lock'
            with lockfile(lock_fn):
                with open(fn, 'ab') as f:
                    f.write(line.encode('utf-8'))
            check_and_send_restart_signal()
        finally:
            reset_queries()

    def _log_problem(self):
        # type: () -> None
        logging.exception("Problem handling data on queue %s" %
                          (self.queue_name, ))

    def setup(self):
        # type: () -> None
        self.q = SimpleQueueClient()

    def start(self):
        # type: () -> None
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self):  # nocoverage
        # type: () -> None
        self.q.stop_consuming()
コード例 #10
0
ファイル: queue_processors.py プロジェクト: yhl-python/zulip
class QueueProcessingWorker(object):
    queue_name = None  # type: str

    def __init__(self):
        # type: () -> None
        self.q = None  # type: SimpleQueueClient
        if self.queue_name is None:
            raise WorkerDeclarationException("Queue worker declared without queue_name")

    def consume(self, data):
        # type: (Mapping[str, Any]) -> None
        raise WorkerDeclarationException("No consumer defined!")

    def consume_wrapper(self, data):
        # type: (Mapping[str, Any]) -> None
        try:
            self.consume(data)
        except Exception:
            self._log_problem()
            if not os.path.exists(settings.QUEUE_ERROR_DIR):
                os.mkdir(settings.QUEUE_ERROR_DIR)
            fname = '%s.errors' % (self.queue_name,)
            fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
            line = u'%s\t%s\n' % (time.asctime(), ujson.dumps(data))
            lock_fn = fn + '.lock'
            with lockfile(lock_fn):
                with open(fn, 'ab') as f:
                    f.write(line.encode('utf-8'))
            check_and_send_restart_signal()
        finally:
            reset_queries()

    def _log_problem(self):
        # type: () -> None
        logging.exception("Problem handling data on queue %s" % (self.queue_name,))

    def setup(self):
        # type: () -> None
        self.q = SimpleQueueClient()

    def start(self):
        # type: () -> None
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self):
        # type: () -> None
        self.q.stop_consuming()
コード例 #11
0
ファイル: queue_processors.py プロジェクト: SummerBulb/zulip
class QueueProcessingWorker(object):
    queue_name = None

    def __init__(self):
        self.q = SimpleQueueClient()
        if self.queue_name is None:
            raise WorkerDeclarationException("Queue worker declared without queue_name")

    def consume(self, data):
        raise WorkerDeclarationException("No consumer defined!")

    def consume_wrapper(self, data):
        try:
            self.consume(data)
        except Exception:
            self._log_problem()
            if not os.path.exists(settings.QUEUE_ERROR_DIR):
                os.mkdir(settings.QUEUE_ERROR_DIR)
            fname = "%s.errors" % (self.queue_name,)
            fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
            line = "%s\t%s\n" % (time.asctime(), ujson.dumps(data))
            lock_fn = fn + ".lock"
            with lockfile(lock_fn):
                with open(fn, "a") as f:
                    f.write(line.encode("utf-8"))
        reset_queries()

    def _log_problem(self):
        logging.exception("Problem handling data on queue %s" % (self.queue_name,))

    def start(self):
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self):
        self.q.stop_consuming()
コード例 #12
0
 def purge_queue(queue_name: str) -> None:
     queue = SimpleQueueClient()
     queue.ensure_queue(queue_name,
                        lambda channel: channel.queue_purge(queue_name))
コード例 #13
0
def main(options: argparse.Namespace) -> int:
    setup_shell_profile('~/.bash_profile')
    setup_shell_profile('~/.zprofile')

    # This needs to happen before anything that imports zproject.settings.
    run(["scripts/setup/generate_secrets.py", "--development"])

    # create log directory `zulip/var/log`
    os.makedirs(LOG_DIR_PATH, exist_ok=True)
    # create upload directory `var/uploads`
    os.makedirs(UPLOAD_DIR_PATH, exist_ok=True)
    # create test upload directory `var/test_upload`
    os.makedirs(TEST_UPLOAD_DIR_PATH, exist_ok=True)
    # create coverage directory `var/coverage`
    os.makedirs(COVERAGE_DIR_PATH, exist_ok=True)
    # create linecoverage directory `var/node-coverage`
    os.makedirs(NODE_TEST_COVERAGE_DIR_PATH, exist_ok=True)
    # create XUnit XML test results directory`var/xunit-test-results`
    os.makedirs(XUNIT_XML_TEST_RESULTS_DIR_PATH, exist_ok=True)

    # The `build_emoji` script requires `emoji-datasource` package
    # which we install via npm; thus this step is after installing npm
    # packages.
    if not os.path.isdir(EMOJI_CACHE_PATH):
        run_as_root(["mkdir", EMOJI_CACHE_PATH])
    run_as_root(["chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
    run(["tools/setup/emoji/build_emoji"])

    # copy over static files from the zulip_bots package
    generate_zulip_bots_static_files()

    build_pygments_data_paths = [
        "tools/setup/build_pygments_data", "tools/setup/lang.json"
    ]
    from pygments import __version__ as pygments_version
    if file_or_package_hash_updated(build_pygments_data_paths,
                                    "build_pygments_data_hash",
                                    options.is_force, [pygments_version]):
        run(["tools/setup/build_pygments_data"])
    else:
        print("No need to run `tools/setup/build_pygments_data`.")

    update_authors_json_paths = [
        "tools/update-authors-json", "zerver/tests/fixtures/authors.json"
    ]
    if file_or_package_hash_updated(update_authors_json_paths,
                                    "update_authors_json_hash",
                                    options.is_force):
        run(["tools/update-authors-json", "--use-fixture"])
    else:
        print("No need to run `tools/update-authors-json`.")

    email_source_paths = [
        "tools/inline-email-css", "templates/zerver/emails/email.css"
    ]
    email_source_paths += glob.glob('templates/zerver/emails/*.source.html')
    if file_or_package_hash_updated(email_source_paths,
                                    "last_email_source_files_hash",
                                    options.is_force):
        run(["tools/inline-email-css"])
    else:
        print("No need to run `tools/inline-email-css`.")

    if not options.is_production_travis:
        # The following block is skipped for the production Travis
        # suite, because that suite doesn't make use of these elements
        # of the development environment (it just uses the development
        # environment to build a release tarball).

        # Need to set up Django before using template_database_status
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
        import django
        django.setup()

        from zerver.lib.test_fixtures import template_database_status, run_db_migrations, \
            destroy_leaked_test_databases

        try:
            from zerver.lib.queue import SimpleQueueClient
            SimpleQueueClient()
            rabbitmq_is_configured = True
        except Exception:
            rabbitmq_is_configured = False

        if options.is_force or not rabbitmq_is_configured:
            run(["scripts/setup/configure-rabbitmq"])
        else:
            print("RabbitMQ is already configured.")

        migration_status_path = os.path.join(UUID_VAR_PATH,
                                             "migration_status_dev")
        dev_template_db_status = template_database_status(
            migration_status=migration_status_path,
            settings="zproject.settings",
            database_name="zulip",
        )
        if options.is_force or dev_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-dev-db"])
            run(["tools/do-destroy-rebuild-database"])
        elif dev_template_db_status == 'run_migrations':
            run_db_migrations('dev')
        elif dev_template_db_status == 'current':
            print("No need to regenerate the dev DB.")

        test_template_db_status = template_database_status()
        if options.is_force or test_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-test-db"])
            run(["tools/do-destroy-rebuild-test-database"])
        elif test_template_db_status == 'run_migrations':
            run_db_migrations('test')
        elif test_template_db_status == 'current':
            print("No need to regenerate the test DB.")

        # Consider updating generated translations data: both `.mo`
        # files and `language-options.json`.
        paths = ['zerver/management/commands/compilemessages.py']
        paths += glob.glob('locale/*/LC_MESSAGES/*.po')
        paths += glob.glob('locale/*/translations.json')

        if file_or_package_hash_updated(paths, "last_compilemessages_hash",
                                        options.is_force):
            run(["./manage.py", "compilemessages"])
        else:
            print("No need to run `manage.py compilemessages`.")

        destroyed = destroy_leaked_test_databases()
        if destroyed:
            print("Dropped %s stale test databases!" % (destroyed, ))

    run(["scripts/lib/clean-unused-caches"])

    # Keeping this cache file around can cause eslint to throw
    # random TypeErrors when new/updated dependencies are added
    if os.path.isfile('.eslintcache'):
        # Remove this block when
        # https://github.com/eslint/eslint/issues/11639 is fixed
        # upstream.
        os.remove('.eslintcache')

    # Clean up the root of the `var/` directory for various
    # testing-related files that we have migrated to
    # `var/<uuid>/test-backend`.
    print("Cleaning var/ directory files...")
    var_paths = glob.glob('var/test*')
    var_paths.append('var/bot_avatar')
    for path in var_paths:
        try:
            if os.path.isdir(path):
                shutil.rmtree(path)
            else:
                os.remove(path)
        except FileNotFoundError:
            pass

    version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
    print('writing to %s\n' % (version_file, ))
    open(version_file, 'w').write(PROVISION_VERSION + '\n')

    print()
    print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
    return 0
コード例 #14
0
ファイル: queue_processors.py プロジェクト: pastewka/zulip
class QueueProcessingWorker(ABC):
    queue_name: str
    MAX_CONSUME_SECONDS: Optional[int] = 30
    ENABLE_TIMEOUTS = False
    CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM = 50
    MAX_SECONDS_BEFORE_UPDATE_STATS = 30

    def __init__(self) -> None:
        self.q: Optional[SimpleQueueClient] = None
        if not hasattr(self, "queue_name"):
            raise WorkerDeclarationException(
                "Queue worker declared without queue_name")

        self.initialize_statistics()

    def initialize_statistics(self) -> None:
        self.queue_last_emptied_timestamp = time.time()
        self.consumed_since_last_emptied = 0
        self.recent_consume_times: MutableSequence[Tuple[int, float]] = deque(
            maxlen=50)
        self.consume_iteration_counter = 0
        self.idle = True
        self.last_statistics_update_time = 0.0

        self.update_statistics(0)

    def update_statistics(self, remaining_local_queue_size: int) -> None:
        total_seconds = sum(seconds
                            for _, seconds in self.recent_consume_times)
        total_events = sum(events_number
                           for events_number, _ in self.recent_consume_times)
        if total_events == 0:
            recent_average_consume_time = None
        else:
            recent_average_consume_time = total_seconds / total_events
        stats_dict = dict(
            update_time=time.time(),
            recent_average_consume_time=recent_average_consume_time,
            current_queue_size=remaining_local_queue_size,
            queue_last_emptied_timestamp=self.queue_last_emptied_timestamp,
            consumed_since_last_emptied=self.consumed_since_last_emptied,
        )

        os.makedirs(settings.QUEUE_STATS_DIR, exist_ok=True)

        fname = f"{self.queue_name}.stats"
        fn = os.path.join(settings.QUEUE_STATS_DIR, fname)
        with lockfile(fn + ".lock"):
            tmp_fn = fn + ".tmp"
            with open(tmp_fn, "wb") as f:
                f.write(
                    orjson.dumps(stats_dict,
                                 option=orjson.OPT_APPEND_NEWLINE
                                 | orjson.OPT_INDENT_2))
            os.rename(tmp_fn, fn)
        self.last_statistics_update_time = time.time()

    def get_remaining_local_queue_size(self) -> int:
        if self.q is not None:
            return self.q.local_queue_size()
        else:
            # This is a special case that will happen if we're operating without
            # using RabbitMQ (e.g. in tests). In that case there's no queuing to speak of
            # and the only reasonable size to return is 0.
            return 0

    @abstractmethod
    def consume(self, data: Dict[str, Any]) -> None:
        pass

    def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                   events: List[Dict[str, Any]]) -> None:
        consume_time_seconds: Optional[float] = None
        with configure_scope() as scope:
            scope.clear_breadcrumbs()
            add_breadcrumb(
                type="debug",
                category="queue_processor",
                message=f"Consuming {self.queue_name}",
                data={
                    "events": events,
                    "local_queue_size": self.get_remaining_local_queue_size()
                },
            )
        try:
            if self.idle:
                # We're reactivating after having gone idle due to emptying the queue.
                # We should update the stats file to keep it fresh and to make it clear
                # that the queue started processing, in case the event we're about to process
                # makes us freeze.
                self.idle = False
                self.update_statistics(self.get_remaining_local_queue_size())

            time_start = time.time()
            if self.MAX_CONSUME_SECONDS and self.ENABLE_TIMEOUTS:
                try:
                    signal.signal(
                        signal.SIGALRM,
                        functools.partial(self.timer_expired,
                                          self.MAX_CONSUME_SECONDS, events),
                    )
                    try:
                        signal.alarm(self.MAX_CONSUME_SECONDS * len(events))
                        consume_func(events)
                    finally:
                        signal.alarm(0)
                finally:
                    signal.signal(signal.SIGALRM, signal.SIG_DFL)
            else:
                consume_func(events)
            consume_time_seconds = time.time() - time_start
            self.consumed_since_last_emptied += len(events)
        except Exception as e:
            self._handle_consume_exception(events, e)
        finally:
            flush_per_request_caches()
            reset_queries()

            if consume_time_seconds is not None:
                self.recent_consume_times.append(
                    (len(events), consume_time_seconds))

            remaining_local_queue_size = self.get_remaining_local_queue_size()
            if remaining_local_queue_size == 0:
                self.queue_last_emptied_timestamp = time.time()
                self.consumed_since_last_emptied = 0
                # We've cleared all the events from the queue, so we don't
                # need to worry about the small overhead of doing a disk write.
                # We take advantage of this to update the stats file to keep it fresh,
                # especially since the queue might go idle until new events come in.
                self.update_statistics(0)
                self.idle = True
                return

            self.consume_iteration_counter += 1
            if (self.consume_iteration_counter >=
                    self.CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM
                    or time.time() - self.last_statistics_update_time >=
                    self.MAX_SECONDS_BEFORE_UPDATE_STATS):
                self.consume_iteration_counter = 0
                self.update_statistics(remaining_local_queue_size)

    def consume_single_event(self, event: Dict[str, Any]) -> None:
        consume_func = lambda events: self.consume(events[0])
        self.do_consume(consume_func, [event])

    def timer_expired(self, limit: int, events: List[Dict[str, Any]],
                      signal: int, frame: FrameType) -> None:
        raise WorkerTimeoutException(self.queue_name, limit, len(events))

    def _handle_consume_exception(self, events: List[Dict[str, Any]],
                                  exception: Exception) -> None:
        if isinstance(exception, InterruptConsumeException):
            # The exception signals that no further error handling
            # is needed and the worker can proceed.
            return

        with configure_scope() as scope:
            scope.set_context(
                "events",
                {
                    "data": events,
                    "queue_name": self.queue_name,
                },
            )
            if isinstance(exception, WorkerTimeoutException):
                with sentry_sdk.push_scope() as scope:
                    scope.fingerprint = ["worker-timeout", self.queue_name]
                    logging.exception(exception, stack_info=True)
            else:
                logging.exception("Problem handling data on queue %s",
                                  self.queue_name,
                                  stack_info=True)
        if not os.path.exists(settings.QUEUE_ERROR_DIR):
            os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
        # Use 'mark_sanitized' to prevent Pysa from detecting this false positive
        # flow. 'queue_name' is always a constant string.
        fname = mark_sanitized(f"{self.queue_name}.errors")
        fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
        line = f"{time.asctime()}\t{orjson.dumps(events).decode()}\n"
        lock_fn = fn + ".lock"
        with lockfile(lock_fn):
            with open(fn, "a") as f:
                f.write(line)
        check_and_send_restart_signal()

    def setup(self) -> None:
        self.q = SimpleQueueClient()

    def start(self) -> None:
        assert self.q is not None
        self.initialize_statistics()
        self.q.start_json_consumer(
            self.queue_name,
            lambda events: self.consume_single_event(events[0]),
        )

    def stop(self) -> None:  # nocoverage
        assert self.q is not None
        self.q.stop_consuming()
コード例 #15
0
ファイル: queue_processors.py プロジェクト: zeenfaizpy/zulip
 def setup(self):
     self.q = SimpleQueueClient()
コード例 #16
0
ファイル: purge_queue.py プロジェクト: waveyuk/zulip
 def purge_queue(queue_name):
     # type: (str) -> None
     queue = SimpleQueueClient()
     queue.ensure_queue(queue_name, lambda: None)
     queue.channel.queue_purge(queue_name)
コード例 #17
0
ファイル: queue_processors.py プロジェクト: 150vb/zulip
 def setup(self):
     self.q = SimpleQueueClient()
コード例 #18
0
ファイル: queue_processors.py プロジェクト: zag/zulip
 def __init__(self):
     self.q = SimpleQueueClient()
コード例 #19
0
ファイル: queue_processors.py プロジェクト: xshengshe/zulip
 def setup(self) -> None:
     self.q = SimpleQueueClient()
コード例 #20
0
ファイル: provision.py プロジェクト: qu3stbaby/zulip
def main(options):
    # type: (Any) -> int

    # yarn and management commands expect to be run from the root of the
    # project.
    os.chdir(ZULIP_PATH)

    # setup-apt-repo does an `apt-get update`
    # hash the apt dependencies
    sha_sum = hashlib.sha1()

    for apt_depedency in APT_DEPENDENCIES[codename]:
        sha_sum.update(apt_depedency.encode('utf8'))
    # hash the content of setup-apt-repo
    sha_sum.update(open('scripts/lib/setup-apt-repo', 'rb').read())

    new_apt_dependencies_hash = sha_sum.hexdigest()
    last_apt_dependencies_hash = None
    apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
    try:
        with open(apt_hash_file_path, 'r') as hash_file:
            last_apt_dependencies_hash = hash_file.read()
    except IOError:
        run(['touch', apt_hash_file_path])

    if (new_apt_dependencies_hash != last_apt_dependencies_hash):
        try:
            install_apt_deps()
        except subprocess.CalledProcessError:
            # Might be a failure due to network connection issues. Retrying...
            print(
                WARNING +
                "`apt-get -y install` failed while installing dependencies; retrying..."
                + ENDC)
            # Since a common failure mode is for the caching in
            # `setup-apt-repo` to optimize the fast code path to skip
            # running `apt-get update` when the target apt repository
            # is out of date, we run it explicitly here so that we
            # recover automatically.
            run(['sudo', 'apt-get', 'update'])
            install_apt_deps()
        with open(apt_hash_file_path, 'w') as hash_file:
            hash_file.write(new_apt_dependencies_hash)
    else:
        print("No changes to apt dependencies, so skipping apt operations.")

    # Here we install node.
    run(["sudo", "-H", "scripts/lib/install-node"])

    # This is a wrapper around `yarn`, which we run last since
    # it can often fail due to network issues beyond our control.
    try:
        # Hack: We remove `node_modules` as root to work around an
        # issue with the symlinks being improperly owned by root.
        if os.path.islink("node_modules"):
            run(["sudo", "rm", "-f", "node_modules"])
        run(["sudo", "mkdir", "-p", NODE_MODULES_CACHE_PATH])
        run([
            "sudo", "chown",
            "%s:%s" % (user_id, user_id), NODE_MODULES_CACHE_PATH
        ])
        setup_node_modules(prefer_offline=True)
    except subprocess.CalledProcessError:
        print(WARNING + "`yarn install` failed; retrying..." + ENDC)
        setup_node_modules()

    # Import tools/setup_venv.py instead of running it so that we get an
    # activated virtualenv for the rest of the provisioning process.
    from tools.setup import setup_venvs
    setup_venvs.main()

    setup_shell_profile('~/.bash_profile')
    setup_shell_profile('~/.zprofile')

    run(["sudo", "cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])

    # create log directory `zulip/var/log`
    run(["mkdir", "-p", LOG_DIR_PATH])
    # create upload directory `var/uploads`
    run(["mkdir", "-p", UPLOAD_DIR_PATH])
    # create test upload directory `var/test_upload`
    run(["mkdir", "-p", TEST_UPLOAD_DIR_PATH])
    # create coverage directory`var/coverage`
    run(["mkdir", "-p", COVERAGE_DIR_PATH])
    # create linecoverage directory`var/node-coverage`
    run(["mkdir", "-p", NODE_TEST_COVERAGE_DIR_PATH])

    # `build_emoji` script requires `emoji-datasource` package which we install
    # via npm and hence it should be executed after we are done installing npm
    # packages.
    if not os.path.isdir(EMOJI_CACHE_PATH):
        run(["sudo", "mkdir", EMOJI_CACHE_PATH])
    run(["sudo", "chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
    run(["tools/setup/emoji/build_emoji"])

    # copy over static files from the zulip_bots package
    run(["tools/setup/generate_zulip_bots_static_files"])

    run(["tools/generate-custom-icon-webfont"])
    run(["tools/setup/build_pygments_data"])
    run(["scripts/setup/generate_secrets.py", "--development"])
    run(["tools/update-authors-json", "--use-fixture"])
    email_source_paths = [
        "tools/inline-email-css", "templates/zerver/emails/email.css"
    ]
    email_source_paths += glob.glob('templates/zerver/emails/*.source.html')
    if file_hash_updated(email_source_paths, "last_email_source_files_hash",
                         options.is_force):
        run(["tools/inline-email-css"])
    else:
        print("No need to run `tools/inline-email-css`.")
    if is_circleci or (is_travis and not options.is_production_travis):
        run(["sudo", "service", "rabbitmq-server", "restart"])
        run(["sudo", "service", "redis-server", "restart"])
        run(["sudo", "service", "memcached", "restart"])
        run(["sudo", "service", "postgresql", "restart"])
    elif options.is_docker:
        run(["sudo", "service", "rabbitmq-server", "restart"])
        run(["sudo", "pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
        run([
            "sudo", "pg_createcluster", "-e", "utf8", "--start",
            POSTGRES_VERSION, "main"
        ])
        run(["sudo", "service", "redis-server", "restart"])
        run(["sudo", "service", "memcached", "restart"])
    if not options.is_production_travis:
        # The following block is skipped for the production Travis
        # suite, because that suite doesn't make use of these elements
        # of the development environment (it just uses the development
        # environment to build a release tarball).

        # Need to set up Django before using template_database_status
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
        import django
        django.setup()

        from zerver.lib.test_fixtures import template_database_status, run_db_migrations

        try:
            from zerver.lib.queue import SimpleQueueClient
            SimpleQueueClient()
            rabbitmq_is_configured = True
        except Exception:
            rabbitmq_is_configured = False

        if options.is_force or not rabbitmq_is_configured:
            run(["scripts/setup/configure-rabbitmq"])
        else:
            print("RabbitMQ is already configured.")

        migration_status_path = os.path.join(UUID_VAR_PATH,
                                             "migration_status_dev")
        dev_template_db_status = template_database_status(
            migration_status=migration_status_path,
            settings="zproject.settings",
            database_name="zulip",
        )
        if options.is_force or dev_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-dev-db"])
            run(["tools/do-destroy-rebuild-database"])
        elif dev_template_db_status == 'run_migrations':
            run_db_migrations('dev')
        elif dev_template_db_status == 'current':
            print("No need to regenerate the dev DB.")

        test_template_db_status = template_database_status()
        if options.is_force or test_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-test-db"])
            run(["tools/do-destroy-rebuild-test-database"])
        elif test_template_db_status == 'run_migrations':
            run_db_migrations('test')
        elif test_template_db_status == 'current':
            print("No need to regenerate the test DB.")

        # Consider updating generated translations data: both `.mo`
        # files and `language-options.json`.
        paths = ['zerver/management/commands/compilemessages.py']
        paths += glob.glob('static/locale/*/LC_MESSAGES/*.po')
        paths += glob.glob('static/locale/*/translations.json')

        if file_hash_updated(paths, "last_compilemessages_hash",
                             options.is_force):
            run(["./manage.py", "compilemessages"])
        else:
            print("No need to run `manage.py compilemessages`.")

    run(["scripts/lib/clean-unused-caches"])

    version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
    print('writing to %s\n' % (version_file, ))
    open(version_file, 'w').write(PROVISION_VERSION + '\n')

    print()
    print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
    return 0
コード例 #21
0
ファイル: queue_processors.py プロジェクト: BakerWang/zulip
 def setup(self) -> None:
     self.q = SimpleQueueClient()
コード例 #22
0
ファイル: queue_processors.py プロジェクト: bgupta/zulip
 def __init__(self):
     self.q = SimpleQueueClient()
コード例 #23
0
ファイル: queue_processors.py プロジェクト: Croolis/zulip
 def __init__(self):
     self.q = SimpleQueueClient()
     if self.queue_name is None:
         raise WorkerDeclarationException("Queue worker declared without queue_name")
コード例 #24
0
 def setup(self) -> None:
     self.q = SimpleQueueClient(
         prefetch=max(self.PREFETCH, self.batch_size))
コード例 #25
0
 def __init__(self):
     self.q = SimpleQueueClient()
     if self.queue_name is None:
         raise WorkerDeclarationException("Queue worker declared without queue_name")
コード例 #26
0
def main(options: argparse.Namespace) -> int:
    setup_bash_profile()
    setup_shell_profile('~/.zprofile')

    # This needs to happen before anything that imports zproject.settings.
    run(["scripts/setup/generate_secrets.py", "--development"])

    create_var_directories()

    # The `build_emoji` script requires `emoji-datasource` package
    # which we install via npm; thus this step is after installing npm
    # packages.
    run(["tools/setup/emoji/build_emoji"])

    # copy over static files from the zulip_bots package
    generate_zulip_bots_static_files()

    if options.is_force or need_to_run_build_pygments_data():
        run(["tools/setup/build_pygments_data"])
        write_new_digest(
            'build_pygments_data_hash',
            build_pygments_data_paths(),
            [pygments_version]
        )
    else:
        print("No need to run `tools/setup/build_pygments_data`.")

    if options.is_force or need_to_run_inline_email_css():
        run(["scripts/setup/inline_email_css.py"])
        write_new_digest(
            "last_email_source_files_hash",
            inline_email_css_paths(),
        )
    else:
        print("No need to run `scripts/setup/inline_email_css.py`.")

    if not options.is_production_test_suite:
        # The following block is skipped for the production test
        # suite, because that suite doesn't make use of these elements
        # of the development environment (it just uses the development
        # environment to build a release tarball).

        # Need to set up Django before using template_status
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
        import django
        django.setup()

        from zerver.lib.test_fixtures import (
            DEV_DATABASE,
            TEST_DATABASE,
            destroy_leaked_test_databases,
        )

        try:
            from zerver.lib.queue import SimpleQueueClient
            SimpleQueueClient()
            rabbitmq_is_configured = True
        except Exception:
            rabbitmq_is_configured = False

        if options.is_force or not rabbitmq_is_configured:
            run(["scripts/setup/configure-rabbitmq"])
        else:
            print("No need to run `scripts/setup/configure-rabbitmq.")

        dev_template_db_status = DEV_DATABASE.template_status()
        if options.is_force or dev_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-dev-db"])
            run(["tools/rebuild-dev-database"])
        elif dev_template_db_status == 'run_migrations':
            DEV_DATABASE.run_db_migrations()
        elif dev_template_db_status == 'current':
            print("No need to regenerate the dev DB.")

        test_template_db_status = TEST_DATABASE.template_status()
        if options.is_force or test_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-test-db"])
            run(["tools/rebuild-test-database"])
        elif test_template_db_status == 'run_migrations':
            TEST_DATABASE.run_db_migrations()
        elif test_template_db_status == 'current':
            print("No need to regenerate the test DB.")

        if options.is_force or need_to_run_compilemessages():
            run(["./manage.py", "compilemessages"])
            write_new_digest(
                "last_compilemessages_hash",
                compilemessages_paths(),
            )
        else:
            print("No need to run `manage.py compilemessages`.")

        destroyed = destroy_leaked_test_databases()
        if destroyed:
            print("Dropped %s stale test databases!" % (destroyed,))

    run(["scripts/lib/clean-unused-caches", "--threshold=6"])

    # Keeping this cache file around can cause eslint to throw
    # random TypeErrors when new/updated dependencies are added
    if os.path.isfile('.eslintcache'):
        # Remove this block when
        # https://github.com/eslint/eslint/issues/11639 is fixed
        # upstream.
        os.remove('.eslintcache')

    # Clean up the root of the `var/` directory for various
    # testing-related files that we have migrated to
    # `var/<uuid>/test-backend`.
    print("Cleaning var/ directory files...")
    var_paths = glob.glob('var/test*')
    var_paths.append('var/bot_avatar')
    for path in var_paths:
        try:
            if os.path.isdir(path):
                shutil.rmtree(path)
            else:
                os.remove(path)
        except FileNotFoundError:
            pass

    version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
    print('writing to %s\n' % (version_file,))
    open(version_file, 'w').write(PROVISION_VERSION + '\n')

    print()
    print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
    return 0
コード例 #27
0
ファイル: queue_processors.py プロジェクト: aakash-cr7/zulip
 def setup(self):
     # type: () -> None
     self.q = SimpleQueueClient()
コード例 #28
0
ファイル: purge_queue.py プロジェクト: brockwhittaker/zulip
 def purge_queue(queue_name: str) -> None:
     queue = SimpleQueueClient()
     queue.ensure_queue(queue_name, lambda: None)
     queue.channel.queue_purge(queue_name)
コード例 #29
0
class QueueProcessingWorker(ABC):
    queue_name: str = None
    CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM = 50

    def __init__(self) -> None:
        self.q: SimpleQueueClient = None
        if self.queue_name is None:
            raise WorkerDeclarationException(
                "Queue worker declared without queue_name")

        self.initialize_statistics()

    def initialize_statistics(self) -> None:
        self.queue_last_emptied_timestamp = time.time()
        self.consumed_since_last_emptied = 0
        self.recent_consume_times: MutableSequence[Tuple[int, float]] = deque(
            maxlen=50)
        self.consume_interation_counter = 0

        self.update_statistics(0)

    def update_statistics(self, remaining_queue_size: int) -> None:
        total_seconds = sum(
            [seconds for _, seconds in self.recent_consume_times])
        total_events = sum(
            [events_number for events_number, _ in self.recent_consume_times])
        if total_events == 0:
            recent_average_consume_time = None
        else:
            recent_average_consume_time = total_seconds / total_events
        stats_dict = dict(
            update_time=time.time(),
            recent_average_consume_time=recent_average_consume_time,
            current_queue_size=remaining_queue_size,
            queue_last_emptied_timestamp=self.queue_last_emptied_timestamp,
            consumed_since_last_emptied=self.consumed_since_last_emptied,
        )

        os.makedirs(settings.QUEUE_STATS_DIR, exist_ok=True)

        fname = '%s.stats' % (self.queue_name, )
        fn = os.path.join(settings.QUEUE_STATS_DIR, fname)
        with lockfile(fn + '.lock'):
            tmp_fn = fn + '.tmp'
            with open(tmp_fn, 'w') as f:
                serialized_dict = ujson.dumps(stats_dict, indent=2)
                serialized_dict += '\n'
                f.write(serialized_dict)
            os.rename(tmp_fn, fn)

    @abstractmethod
    def consume(self, data: Dict[str, Any]) -> None:
        pass

    def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                   events: List[Dict[str, Any]]) -> None:
        try:
            time_start = time.time()
            consume_func(events)
            consume_time_seconds: Optional[float] = time.time() - time_start
            self.consumed_since_last_emptied += len(events)
        except Exception:
            self._handle_consume_exception(events)
            consume_time_seconds = None
        finally:
            flush_per_request_caches()
            reset_queries()

            if consume_time_seconds is not None:
                self.recent_consume_times.append(
                    (len(events), consume_time_seconds))

            if self.q is not None:
                remaining_queue_size = self.q.queue_size()
            else:
                remaining_queue_size = 0

            if remaining_queue_size == 0:
                self.queue_last_emptied_timestamp = time.time()
                self.consumed_since_last_emptied = 0

            self.consume_interation_counter += 1
            if self.consume_interation_counter >= self.CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM:

                self.consume_interation_counter = 0
                self.update_statistics(remaining_queue_size)

    def consume_wrapper(self, data: Dict[str, Any]) -> None:
        consume_func = lambda events: self.consume(events[0])
        self.do_consume(consume_func, [data])

    def _handle_consume_exception(self, events: List[Dict[str, Any]]) -> None:
        self._log_problem()
        if not os.path.exists(settings.QUEUE_ERROR_DIR):
            os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
        fname = '%s.errors' % (self.queue_name, )
        fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
        line = '%s\t%s\n' % (time.asctime(), ujson.dumps(events))
        lock_fn = fn + '.lock'
        with lockfile(lock_fn):
            with open(fn, 'ab') as f:
                f.write(line.encode('utf-8'))
        check_and_send_restart_signal()

    def _log_problem(self) -> None:
        logging.exception("Problem handling data on queue %s" %
                          (self.queue_name, ))

    def setup(self) -> None:
        self.q = SimpleQueueClient()

    def start(self) -> None:
        self.initialize_statistics()
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self) -> None:  # nocoverage
        self.q.stop_consuming()
コード例 #30
0
 def setup(self) -> None:
     self.q = SimpleQueueClient(prefetch=self.PREFETCH)
コード例 #31
0
def main(options):
    # type: (Any) -> int

    # yarn and management commands expect to be run from the root of the
    # project.
    os.chdir(ZULIP_PATH)

    # hash the apt dependencies
    sha_sum = hashlib.sha1()

    for apt_depedency in SYSTEM_DEPENDENCIES:
        sha_sum.update(apt_depedency.encode('utf8'))
    if vendor in ["Ubuntu", "Debian"]:
        sha_sum.update(open('scripts/lib/setup-apt-repo', 'rb').read())
    else:
        # hash the content of setup-yum-repo and build-*
        sha_sum.update(open('scripts/lib/setup-yum-repo', 'rb').read())
        build_paths = glob.glob("scripts/lib/build-")
        for bp in build_paths:
            sha_sum.update(open(bp, 'rb').read())

    new_apt_dependencies_hash = sha_sum.hexdigest()
    last_apt_dependencies_hash = None
    apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash")
    with open(apt_hash_file_path, 'a+') as hash_file:
        hash_file.seek(0)
        last_apt_dependencies_hash = hash_file.read()

    if (new_apt_dependencies_hash != last_apt_dependencies_hash):
        try:
            install_system_deps()
        except subprocess.CalledProcessError:
            # Might be a failure due to network connection issues. Retrying...
            install_system_deps(retry=True)
        with open(apt_hash_file_path, 'w') as hash_file:
            hash_file.write(new_apt_dependencies_hash)
    else:
        print("No changes to apt dependencies, so skipping apt operations.")

    # Here we install node.
    proxy_env = [
        "env",
        "http_proxy=" + os.environ.get("http_proxy", ""),
        "https_proxy=" + os.environ.get("https_proxy", ""),
        "no_proxy=" + os.environ.get("no_proxy", ""),
    ]
    run_as_root(proxy_env + ["scripts/lib/install-node"], sudo_args = ['-H'])

    # This is a wrapper around `yarn`, which we run last since
    # it can often fail due to network issues beyond our control.
    try:
        # Hack: We remove `node_modules` as root to work around an
        # issue with the symlinks being improperly owned by root.
        if os.path.islink("node_modules"):
            run_as_root(["rm", "-f", "node_modules"])
        run_as_root(["mkdir", "-p", NODE_MODULES_CACHE_PATH])
        run_as_root(["chown", "%s:%s" % (user_id, user_id), NODE_MODULES_CACHE_PATH])
        setup_node_modules(prefer_offline=True)
    except subprocess.CalledProcessError:
        print(WARNING + "`yarn install` failed; retrying..." + ENDC)
        setup_node_modules()

    # Install shellcheck.
    run_as_root(["scripts/lib/install-shellcheck"])

    from tools.setup import setup_venvs
    setup_venvs.main()

    activate_this = "/srv/zulip-py3-venv/bin/activate_this.py"
    exec(open(activate_this).read(), {}, dict(__file__=activate_this))

    setup_shell_profile('~/.bash_profile')
    setup_shell_profile('~/.zprofile')

    # This needs to happen before anything that imports zproject.settings.
    run(["scripts/setup/generate_secrets.py", "--development"])

    run_as_root(["cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])

    # create log directory `zulip/var/log`
    os.makedirs(LOG_DIR_PATH, exist_ok=True)
    # create upload directory `var/uploads`
    os.makedirs(UPLOAD_DIR_PATH, exist_ok=True)
    # create test upload directory `var/test_upload`
    os.makedirs(TEST_UPLOAD_DIR_PATH, exist_ok=True)
    # create coverage directory `var/coverage`
    os.makedirs(COVERAGE_DIR_PATH, exist_ok=True)
    # create linecoverage directory `var/node-coverage`
    os.makedirs(NODE_TEST_COVERAGE_DIR_PATH, exist_ok=True)

    # The `build_emoji` script requires `emoji-datasource` package
    # which we install via npm; thus this step is after installing npm
    # packages.
    if not os.path.isdir(EMOJI_CACHE_PATH):
        run_as_root(["mkdir", EMOJI_CACHE_PATH])
    run_as_root(["chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
    run(["tools/setup/emoji/build_emoji"])

    # copy over static files from the zulip_bots package
    generate_zulip_bots_static_files()

    webfont_paths = ["tools/setup/generate-custom-icon-webfont", "static/icons/fonts/template.hbs"]
    webfont_paths += glob.glob('static/assets/icons/*')
    if file_or_package_hash_updated(webfont_paths, "webfont_files_hash", options.is_force):
        run(["tools/setup/generate-custom-icon-webfont"])
    else:
        print("No need to run `tools/setup/generate-custom-icon-webfont`.")

    build_pygments_data_paths = ["tools/setup/build_pygments_data", "tools/setup/lang.json"]
    from pygments import __version__ as pygments_version
    if file_or_package_hash_updated(build_pygments_data_paths, "build_pygments_data_hash", options.is_force,
                                    [pygments_version]):
        run(["tools/setup/build_pygments_data"])
    else:
        print("No need to run `tools/setup/build_pygments_data`.")

    update_authors_json_paths = ["tools/update-authors-json", "zerver/tests/fixtures/authors.json"]
    if file_or_package_hash_updated(update_authors_json_paths, "update_authors_json_hash", options.is_force):
        run(["tools/update-authors-json", "--use-fixture"])
    else:
        print("No need to run `tools/update-authors-json`.")

    email_source_paths = ["tools/inline-email-css", "templates/zerver/emails/email.css"]
    email_source_paths += glob.glob('templates/zerver/emails/*.source.html')
    if file_or_package_hash_updated(email_source_paths, "last_email_source_files_hash", options.is_force):
        run(["tools/inline-email-css"])
    else:
        print("No need to run `tools/inline-email-css`.")

    if is_circleci or (is_travis and not options.is_production_travis):
        run_as_root(["service", "rabbitmq-server", "restart"])
        run_as_root(["service", "redis-server", "restart"])
        run_as_root(["service", "memcached", "restart"])
        run_as_root(["service", "postgresql", "restart"])
    elif family == 'redhat':
        for service in ["postgresql-%s" % (POSTGRES_VERSION,), "rabbitmq-server", "memcached", "redis"]:
            run_as_root(["systemctl", "enable", service], sudo_args = ['-H'])
            run_as_root(["systemctl", "start", service], sudo_args = ['-H'])
    elif options.is_docker:
        run_as_root(["service", "rabbitmq-server", "restart"])
        run_as_root(["pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
        run_as_root(["pg_createcluster", "-e", "utf8", "--start", POSTGRES_VERSION, "main"])
        run_as_root(["service", "redis-server", "restart"])
        run_as_root(["service", "memcached", "restart"])
    if not options.is_production_travis:
        # The following block is skipped for the production Travis
        # suite, because that suite doesn't make use of these elements
        # of the development environment (it just uses the development
        # environment to build a release tarball).

        # Need to set up Django before using template_database_status
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
        import django
        django.setup()

        from zerver.lib.test_fixtures import template_database_status, run_db_migrations

        try:
            from zerver.lib.queue import SimpleQueueClient
            SimpleQueueClient()
            rabbitmq_is_configured = True
        except Exception:
            rabbitmq_is_configured = False

        if options.is_force or not rabbitmq_is_configured:
            run(["scripts/setup/configure-rabbitmq"])
        else:
            print("RabbitMQ is already configured.")

        migration_status_path = os.path.join(UUID_VAR_PATH, "migration_status_dev")
        dev_template_db_status = template_database_status(
            migration_status=migration_status_path,
            settings="zproject.settings",
            database_name="zulip",
        )
        if options.is_force or dev_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-dev-db"])
            run(["tools/do-destroy-rebuild-database"])
        elif dev_template_db_status == 'run_migrations':
            run_db_migrations('dev')
        elif dev_template_db_status == 'current':
            print("No need to regenerate the dev DB.")

        test_template_db_status = template_database_status()
        if options.is_force or test_template_db_status == 'needs_rebuild':
            run(["tools/setup/postgres-init-test-db"])
            run(["tools/do-destroy-rebuild-test-database"])
        elif test_template_db_status == 'run_migrations':
            run_db_migrations('test')
        elif test_template_db_status == 'current':
            print("No need to regenerate the test DB.")

        # Consider updating generated translations data: both `.mo`
        # files and `language-options.json`.
        paths = ['zerver/management/commands/compilemessages.py']
        paths += glob.glob('static/locale/*/LC_MESSAGES/*.po')
        paths += glob.glob('static/locale/*/translations.json')

        if file_or_package_hash_updated(paths, "last_compilemessages_hash", options.is_force):
            run(["./manage.py", "compilemessages"])
        else:
            print("No need to run `manage.py compilemessages`.")

    run(["scripts/lib/clean-unused-caches"])

    version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
    print('writing to %s\n' % (version_file,))
    open(version_file, 'w').write(PROVISION_VERSION + '\n')

    print()
    print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
    return 0
コード例 #32
0
class QueueProcessingWorker(ABC):
    queue_name: str
    CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM = 50

    def __init__(self) -> None:
        self.q: Optional[SimpleQueueClient] = None
        if not hasattr(self, "queue_name"):
            raise WorkerDeclarationException("Queue worker declared without queue_name")

        self.initialize_statistics()

    def initialize_statistics(self) -> None:
        self.queue_last_emptied_timestamp = time.time()
        self.consumed_since_last_emptied = 0
        self.recent_consume_times: MutableSequence[Tuple[int, float]] = deque(maxlen=50)
        self.consume_interation_counter = 0

        self.update_statistics(0)

    def update_statistics(self, remaining_queue_size: int) -> None:
        total_seconds = sum([seconds for _, seconds in self.recent_consume_times])
        total_events = sum([events_number for events_number, _ in self.recent_consume_times])
        if total_events == 0:
            recent_average_consume_time = None
        else:
            recent_average_consume_time = total_seconds / total_events
        stats_dict = dict(
            update_time=time.time(),
            recent_average_consume_time=recent_average_consume_time,
            current_queue_size=remaining_queue_size,
            queue_last_emptied_timestamp=self.queue_last_emptied_timestamp,
            consumed_since_last_emptied=self.consumed_since_last_emptied,
        )

        os.makedirs(settings.QUEUE_STATS_DIR, exist_ok=True)

        fname = f'{self.queue_name}.stats'
        fn = os.path.join(settings.QUEUE_STATS_DIR, fname)
        with lockfile(fn + '.lock'):
            tmp_fn = fn + '.tmp'
            with open(tmp_fn, 'wb') as f:
                f.write(
                    orjson.dumps(stats_dict, option=orjson.OPT_APPEND_NEWLINE | orjson.OPT_INDENT_2)
                )
            os.rename(tmp_fn, fn)

    @abstractmethod
    def consume(self, data: Dict[str, Any]) -> None:
        pass

    def do_consume(self, consume_func: Callable[[List[Dict[str, Any]]], None],
                   events: List[Dict[str, Any]]) -> None:
        consume_time_seconds: Optional[float] = None
        try:
            time_start = time.time()
            consume_func(events)
            consume_time_seconds = time.time() - time_start
            self.consumed_since_last_emptied += len(events)
        except Exception:
            self._handle_consume_exception(events)
        finally:
            flush_per_request_caches()
            reset_queries()

            if consume_time_seconds is not None:
                self.recent_consume_times.append((len(events), consume_time_seconds))

            if self.q is not None:
                remaining_queue_size = self.q.queue_size()
            else:
                remaining_queue_size = 0

            if remaining_queue_size == 0:
                self.queue_last_emptied_timestamp = time.time()
                self.consumed_since_last_emptied = 0

            self.consume_interation_counter += 1
            if self.consume_interation_counter >= self.CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM:

                self.consume_interation_counter = 0
                self.update_statistics(remaining_queue_size)

    def consume_wrapper(self, data: Dict[str, Any]) -> None:
        consume_func = lambda events: self.consume(events[0])
        self.do_consume(consume_func, [data])

    def _handle_consume_exception(self, events: List[Dict[str, Any]]) -> None:
        self._log_problem()
        if not os.path.exists(settings.QUEUE_ERROR_DIR):
            os.mkdir(settings.QUEUE_ERROR_DIR)  # nocoverage
        # Use 'mark_sanitized' to prevent Pysa from detecting this false positive
        # flow. 'queue_name' is always a constant string.
        fname = mark_sanitized(f'{self.queue_name}.errors')
        fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
        line = f'{time.asctime()}\t{orjson.dumps(events).decode()}\n'
        lock_fn = fn + '.lock'
        with lockfile(lock_fn):
            with open(fn, 'ab') as f:
                f.write(line.encode('utf-8'))
        check_and_send_restart_signal()

    def _log_problem(self) -> None:
        logging.exception("Problem handling data on queue %s", self.queue_name, stack_info=True)

    def setup(self) -> None:
        self.q = SimpleQueueClient()

    def start(self) -> None:
        assert self.q is not None
        self.initialize_statistics()
        self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
        self.q.start_consuming()

    def stop(self) -> None:  # nocoverage
        assert self.q is not None
        self.q.stop_consuming()
コード例 #33
0
ファイル: queue_processors.py プロジェクト: AmoliShah/zulip
 def setup(self):
     # type: () -> None
     self.q = SimpleQueueClient()
コード例 #34
0
ファイル: purge_queue.py プロジェクト: nskillen/zulip
 def handle(self, *args, **options):
     queue_name = options["queue_name"]
     queue = SimpleQueueClient()
     queue.ensure_queue(queue_name, lambda: None)
     queue.channel.queue_purge(queue_name)
     print "Done"