Esempio n. 1
0
def init_worker(counter: "multiprocessing.sharedctypes._Value") -> None:
    """
    This function runs only under parallel mode. It initializes the
    individual processes which are also called workers.
    """
    global _worker_id

    with counter.get_lock():
        counter.value += 1
        _worker_id = counter.value
    """
    You can now use _worker_id.
    """

    # Clear the cache
    from zerver.lib.cache import get_cache_backend
    cache = get_cache_backend(None)
    cache.clear()

    # Close all connections
    connections.close_all()

    destroy_test_databases(_worker_id)
    create_test_databases(_worker_id)
    initialize_worker_path(_worker_id)

    # We manually update the upload directory path in the URL regex.
    from zproject.dev_urls import avatars_url
    new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
    avatars_url.default_args['document_root'] = new_root
Esempio n. 2
0
def queries_captured(
    include_savepoints: bool = False,
    keep_cache_warm: bool = False
) -> Generator[List[Dict[str, Union[str, bytes]]], None, None]:
    """
    Allow a user to capture just the queries executed during
    the with statement.
    """

    queries: List[Dict[str, Union[str, bytes]]] = []

    def wrapper_execute(
        self: TimeTrackingCursor,
        action: Callable[[Query, ParamsT], None],
        sql: Query,
        params: ParamsT,
    ) -> None:
        start = time.time()
        try:
            return action(sql, params)
        finally:
            stop = time.time()
            duration = stop - start
            if include_savepoints or not isinstance(
                    sql, str) or "SAVEPOINT" not in sql:
                queries.append({
                    "sql": self.mogrify(sql, params).decode(),
                    "time": f"{duration:.3f}",
                })

    def cursor_execute(self: TimeTrackingCursor,
                       sql: Query,
                       params: Optional[Params] = None) -> None:
        return wrapper_execute(self,
                               super(TimeTrackingCursor, self).execute, sql,
                               params)

    def cursor_executemany(self: TimeTrackingCursor, sql: Query,
                           params: Iterable[Params]) -> None:
        return wrapper_execute(
            self,
            super(TimeTrackingCursor, self).executemany, sql,
            params)  # nocoverage -- doesn't actually get used in tests

    if not keep_cache_warm:
        cache = get_cache_backend(None)
        cache.clear()
    with mock.patch.multiple(TimeTrackingCursor,
                             execute=cursor_execute,
                             executemany=cursor_executemany):
        yield queries
Esempio n. 3
0
def init_worker(counter):
    # type: (Synchronized) -> None
    """
    This function runs only under parallel mode. It initializes the
    individual processes which are also called workers.
    """
    global _worker_id

    with counter.get_lock():
        counter.value += 1
        _worker_id = counter.value

    """
    You can now use _worker_id.
    """

    test_classes.API_KEYS = {}

    # Clear the cache
    from zerver.lib.cache import get_cache_backend
    cache = get_cache_backend(None)
    cache.clear()

    # Close all connections
    connections.close_all()

    destroy_test_databases(_worker_id)
    create_test_databases(_worker_id)

    # Every process should upload to a separate directory so that
    # race conditions can be avoided.
    settings.LOCAL_UPLOADS_DIR = '{}_{}'.format(settings.LOCAL_UPLOADS_DIR,
                                                _worker_id)

    def is_upload_avatar_url(url):
        # type: (RegexURLPattern) -> bool
        if url.regex.pattern == r'^user_avatars/(?P<path>.*)$':
            return True
        return False

    # We manually update the upload directory path in the url regex.
    from zproject import dev_urls
    found = False
    for url in dev_urls.urls:
        if is_upload_avatar_url(url):
            found = True
            new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
            url.default_args['document_root'] = new_root

    if not found:
        print("*** Upload directory not found.")
Esempio n. 4
0
def init_worker(counter):
    # type: (Synchronized) -> None
    """
    This function runs only under parallel mode. It initializes the
    individual processes which are also called workers.
    """
    global _worker_id

    with counter.get_lock():
        counter.value += 1
        _worker_id = counter.value

    """
    You can now use _worker_id.
    """

    test_classes.API_KEYS = {}

    # Clear the cache
    from zerver.lib.cache import get_cache_backend
    cache = get_cache_backend(None)
    cache.clear()

    # Close all connections
    connections.close_all()

    destroy_test_databases(_worker_id)
    create_test_databases(_worker_id)

    # Every process should upload to a separate directory so that
    # race conditions can be avoided.
    settings.LOCAL_UPLOADS_DIR = '{}_{}'.format(settings.LOCAL_UPLOADS_DIR,
                                                _worker_id)

    def is_upload_avatar_url(url):
        # type: (RegexURLPattern) -> bool
        if url.regex.pattern == r'^user_avatars/(?P<path>.*)$':
            return True
        return False

    # We manually update the upload directory path in the url regex.
    from zproject import dev_urls
    found = False
    for url in dev_urls.urls:
        if is_upload_avatar_url(url):
            found = True
            new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
            url.default_args['document_root'] = new_root

    if not found:
        print("*** Upload directory not found.")
Esempio n. 5
0
 def wrapper_execute(self, action, sql, params=()):
     # type: (TimeTrackingCursor, Callable, NonBinaryStr, Iterable[Any]) -> None
     cache = get_cache_backend(None)
     cache.clear()
     start = time.time()
     try:
         return action(sql, params)
     finally:
         stop = time.time()
         duration = stop - start
         if include_savepoints or ('SAVEPOINT' not in sql):
             queries.append({
                 'sql': self.mogrify(sql, params).decode('utf-8'),
                 'time': "%.3f" % duration,
             })
Esempio n. 6
0
 def wrapper_execute(self, action, sql, params=()):
     # type: (TimeTrackingCursor, Callable, NonBinaryStr, Iterable[Any]) -> None
     cache = get_cache_backend(None)
     cache.clear()
     start = time.time()
     try:
         return action(sql, params)
     finally:
         stop = time.time()
         duration = stop - start
         if include_savepoints or ('SAVEPOINT' not in sql):
             queries.append({
                 'sql': self.mogrify(sql, params).decode('utf-8'),
                 'time': "%.3f" % duration,
             })
Esempio n. 7
0
 def wrapper_execute(self: TimeTrackingCursor,
                     action: Callable[[str, Iterable[Any]], None],
                     sql: str,
                     params: Iterable[Any] = ()) -> None:
     cache = get_cache_backend(None)
     cache.clear()
     start = time.time()
     try:
         return action(sql, params)
     finally:
         stop = time.time()
         duration = stop - start
         if include_savepoints or ('SAVEPOINT' not in sql):
             queries.append({
                 'sql': self.mogrify(sql, params).decode('utf-8'),
                 'time': "%.3f" % (duration, ),
             })
Esempio n. 8
0
 def wrapper_execute(self: TimeTrackingCursor,
                     action: Callable[[str, ParamsT], None],
                     sql: Query,
                     params: ParamsT) -> None:
     cache = get_cache_backend(None)
     cache.clear()
     start = time.time()
     try:
         return action(sql, params)
     finally:
         stop = time.time()
         duration = stop - start
         if include_savepoints or not isinstance(sql, str) or 'SAVEPOINT' not in sql:
             queries.append({
                 'sql': self.mogrify(sql, params).decode('utf-8'),
                 'time': f"{duration:.3f}",
             })
Esempio n. 9
0
def init_worker(counter):
    # type: (Synchronized) -> None
    global _worker_id
    test_classes.API_KEYS = {}

    # Clear the cache
    from zerver.lib.cache import get_cache_backend
    cache = get_cache_backend(None)
    cache.clear()

    # Close all connections
    connections.close_all()

    with counter.get_lock():
        counter.value += 1
        _worker_id = counter.value

    destroy_test_databases(_worker_id)
    create_test_databases(_worker_id)
Esempio n. 10
0
def init_worker(counter):
    # type: (Synchronized) -> None
    global _worker_id
    test_classes.API_KEYS = {}

    # Clear the cache
    from zerver.lib.cache import get_cache_backend
    cache = get_cache_backend(None)
    cache.clear()

    # Close all connections
    connections.close_all()

    with counter.get_lock():
        counter.value += 1
        _worker_id = counter.value

    for alias in connections:
        connection = connections[alias]

        try:
            connection.creation.destroy_test_db(number=_worker_id)
        except Exception:
            # DB doesn't exist. No need to do anything.
            pass

        connection.creation.clone_test_db(
            number=_worker_id,
            keepdb=True,
        )

        settings_dict = connection.creation.get_test_db_clone_settings(
            _worker_id)
        # connection.settings_dict must be updated in place for changes to be
        # reflected in django.db.connections. If the following line assigned
        # connection.settings_dict = settings_dict, new threads would connect
        # to the default database instead of the appropriate clone.
        connection.settings_dict.update(settings_dict)
        connection.close()
Esempio n. 11
0
def init_worker(counter):
    # type: (Synchronized) -> None
    """
    This function runs only under parallel mode. It initializes the
    individual processes which are also called workers.
    """
    global _worker_id
    test_classes.API_KEYS = {}

    # Clear the cache
    from zerver.lib.cache import get_cache_backend
    cache = get_cache_backend(None)
    cache.clear()

    # Close all connections
    connections.close_all()

    with counter.get_lock():
        counter.value += 1
        _worker_id = counter.value

    destroy_test_databases(_worker_id)
    create_test_databases(_worker_id)
Esempio n. 12
0
def remove_caches(request: HttpRequest) -> HttpResponse:
    cache = get_cache_backend(None)
    cache.clear()
    clear_client_cache()
    flush_per_request_caches()
    return json_success()