Exemplo n.º 1
0
    def test_cache_memoize_large_files(self):
        """Testing cache_memoize with large files"""
        cacheKey = "abc123"

        # This takes into account the size of the pickle data, and will
        # get us to exactly 2 chunks of data in cache.
        data = 'x' * (CACHE_CHUNK_SIZE * 2 - 8)

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return data

        result = cache_memoize(cacheKey, cacheFunc, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)

        self.assertTrue(make_cache_key(cacheKey) in cache)
        self.assertTrue(make_cache_key('%s-0' % cacheKey) in cache)
        self.assertTrue(make_cache_key('%s-1' % cacheKey) in cache)
        self.assertFalse(make_cache_key('%s-2' % cacheKey) in cache)

        result = cache_memoize(cacheKey, cacheFunc, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
Exemplo n.º 2
0
    def test_cache_memoize_large_files_compressed(self):
        """Testing cache_memoize with large files with compression"""
        cache_key = 'abc123'

        data, pickled_data = self._build_test_chunk_data(num_chunks=2)

        def cache_func():
            return data

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=True)
        self.assertTrue(cache_func.spy.called)

        cache_key_0 = make_cache_key('%s-0' % cache_key)

        self.assertTrue(make_cache_key(cache_key) in cache)
        self.assertTrue(cache_key_0 in cache)
        self.assertFalse(make_cache_key('%s-1' % cache_key) in cache)
        self.assertFalse(make_cache_key('%s-2' % cache_key) in cache)

        # Verify the contents of the stored data.
        stored_data = cache.get(cache_key_0)[0]
        self.assertEqual(stored_data, zlib.compress(pickled_data))

        # Try fetching the data we stored.
        cache_func.spy.reset_calls()
        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=True)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)
Exemplo n.º 3
0
    def get_file_exists(self,
                        path,
                        revision,
                        base_commit_id=None,
                        request=None):
        """Returns whether or not a file exists in the repository.

        If the repository is backed by a hosting service, this will go
        through that. Otherwise, it will attempt to directly access the
        repository.

        The result of this call will be cached, making future lookups
        of this path and revision on this repository faster.
        """
        key = self._make_file_exists_cache_key(path, revision, base_commit_id)

        if cache.get(make_cache_key(key)) == '1':
            return True

        exists = self._get_file_exists_uncached(path, revision, base_commit_id,
                                                request)

        if exists:
            cache_memoize(key, lambda: '1')

        return exists
Exemplo n.º 4
0
    def test_cache_memoize_large_files(self):
        """Testing cache_memoize with large files"""
        cacheKey = "abc123"

        # This takes into account the size of the pickle data, and will
        # get us to exactly 2 chunks of data in cache.
        data = 'x' * (CACHE_CHUNK_SIZE * 2 - 8)

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return data

        result = cache_memoize(cacheKey,
                               cacheFunc,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)

        self.assertTrue(make_cache_key(cacheKey) in cache)
        self.assertTrue(make_cache_key('%s-0' % cacheKey) in cache)
        self.assertTrue(make_cache_key('%s-1' % cacheKey) in cache)
        self.assertFalse(make_cache_key('%s-2' % cacheKey) in cache)

        result = cache_memoize(cacheKey,
                               cacheFunc,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
Exemplo n.º 5
0
def _is_admin_for_user(self, user):
    """Return whether or not this user is an administrator for the given user.

    Results will be cached for this user so that at most one query is done.

    Args:
        user (django.contrib.auth.models.User):
            The user to check.

    Returns:
        bool:
        Whether or not this user is an administrator for the given user.
    """
    if self.is_staff:
        return True

    if not user or user.is_anonymous():
        return False

    if not hasattr(self, '_cached_admin_for_users'):
        self._cached_admin_for_users = cache_memoize(
            '%s-admin-for-users' % self.pk, lambda: tuple(
                User.objects.filter(local_site__admins=self).values_list(
                    'pk', flat=True)))

    return user.pk in self._cached_admin_for_users
Exemplo n.º 6
0
    def test_cache_memoize_large_files_missing_chunk(self):
        """Testing cache_memoize with loading large files with missing chunks
        """
        cache_key = 'abc123'

        # This takes into account the size of the pickle data, and will
        # get us to exactly 2 chunks of data in cache.
        data, pickled_data = self._build_test_chunk_data(num_chunks=2)

        cache.set(make_cache_key(cache_key), '2')
        cache.set(make_cache_key('%s-0' % cache_key),
                  [pickled_data[:CACHE_CHUNK_SIZE]])

        def cache_func():
            return data

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key,
                               cache_func,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(len(result), len(data))
        self.assertEqual(result, data)
        self.assertTrue(cache_func.spy.called)
Exemplo n.º 7
0
    def test_cache_memoize_with_unicode_data(self):
        """Testing cache_memoize with Unicode data"""
        cache_key = 'abc123'
        test_str = '🙃' * 5

        def cache_func(cache_called=[]):
            self.assertFalse(cache_called)
            cache_called.append(True)
            return test_str

        result = cache_memoize(cache_key, cache_func)
        self.assertEqual(result, test_str)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cache_key, cache_func)
        self.assertEqual(result, test_str)
Exemplo n.º 8
0
def _is_admin_for_user(self, user):
    """Return whether or not this user is an administrator for the given user.

    Results will be cached for this user so that at most one query is done.

    Args:
        user (django.contrib.auth.models.User):
            The user to check.

    Returns:
        bool:
        Whether or not this user is an administrator for the given user.
    """
    if self.is_staff:
        return True

    if not user or user.is_anonymous():
        return False

    if not hasattr(self, '_cached_admin_for_users'):
        self._cached_admin_for_users = cache_memoize(
            '%s-admin-for-users' % self.pk,
            lambda: tuple(
                User.objects
                .filter(local_site__admins=self)
                .values_list('pk', flat=True)
            ))

    return user.pk in self._cached_admin_for_users
Exemplo n.º 9
0
    def _get_api_version(self, hosting_url):
        """Return the version of the API supported by the given server.

        This method will cache the result.

        Args:
            hosting_url (unicode):
                The URL of the GitLab server.

        Returns:
            unicode:
            The version of the API as a string.

            It is returned as a string because
            :py:func:`djblets.cache.backend.cache_memoize` does not work on
            integer results.
        """
        headers = {}

        if self.account.data and 'private_token' in self.account.data:
            headers['PRIVATE-TOKEN'] = decrypt_password(
                self.account.data['private_token']).encode('utf-8')

        return cache_memoize('gitlab-api-version:%s' % hosting_url,
                             expiration=3600,
                             lookup_callable=lambda: self._try_api_versions(
                                 hosting_url,
                                 headers=headers,
                                 path='/projects?per_page=1',
                             )[0])
Exemplo n.º 10
0
    def get_file(self, path, revision, base_commit_id=None, request=None):
        """Returns a file from the repository.

        This will attempt to retrieve the file from the repository. If the
        repository is backed by a hosting service, it will go through that.
        Otherwise, it will attempt to directly access the repository.
        """
        # We wrap the result of get_file in a list and then return the first
        # element after getting the result from the cache. This prevents the
        # cache backend from converting to unicode, since we're no longer
        # passing in a string and the cache backend doesn't recursively look
        # through the list in order to convert the elements inside.
        #
        # Basically, this fixes the massive regressions introduced by the
        # Django unicode changes.
        if not isinstance(path, six.text_type):
            raise TypeError('"path" must be a Unicode string, not %s' %
                            type(path))

        if not isinstance(revision, six.text_type):
            raise TypeError('"revision" must be a Unicode string, not %s' %
                            type(revision))

        if (base_commit_id is not None
                and not isinstance(base_commit_id, six.text_type)):
            raise TypeError('"base_commit_id" must be a Unicode string, '
                            'not %s' % type(base_commit_id))

        return cache_memoize(
            self._make_file_cache_key(path, revision, base_commit_id),
            lambda:
            [self._get_file_uncached(path, revision, base_commit_id, request)],
            large_data=True)[0]
Exemplo n.º 11
0
    def get_commits(self, start=None):
        """Returns a list of commits.

        This is paginated via the 'start' parameter. Any exceptions are
        expected to be handled by the caller.
        """
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-commits:%s:%s' %
                                   (self.pk, start))
        if hosting_service:
            commits_callable = lambda: hosting_service.get_commits(self, start)
        else:
            commits_callable = lambda: self.get_scmtool().get_commits(start)

        # We cache both the entire list for 'start', as well as each individual
        # commit. This allows us to reduce API load when people are looking at
        # the "new review request" page more frequently than they're pushing
        # code, and will usually save 1 API request when they go to actually
        # create a new review request.
        commits = cache_memoize(cache_key, commits_callable)

        for commit in commits:
            cache.set(self.get_commit_cache_key(commit.id), commit,
                      self.COMMITS_CACHE_PERIOD)

        return commits
Exemplo n.º 12
0
    def _get_api_version(self, hosting_url):
        """Return the version of the API supported by the given server.

        This method will cache the result.

        Args:
            hosting_url (unicode):
                The URL of the GitLab server.

        Returns:
            unicode:
            The version of the API as a string.

            It is returned as a string because
            :py:func:`djblets.cache.backend.cache_memoize` does not work on
            integer results.
        """
        headers = {}

        if self.account.data and 'private_token' in self.account.data:
            headers[b'PRIVATE-TOKEN'] = decrypt_password(
                self.account.data['private_token'])

        return cache_memoize(
            'gitlab-api-version:%s' % hosting_url,
            expiration=3600,
            lookup_callable=lambda: self._try_api_versions(
                hosting_url,
                headers=headers,
                path='/projects?per_page=1',
            )[0])
Exemplo n.º 13
0
    def get_text(self):
        """Return the file contents as a string.

        This will fetch the file and then cache it for future renders.
        """
        return cache_memoize('text-attachment-%d-string' % self.obj.pk,
                             self._get_text_uncached)
Exemplo n.º 14
0
    def test_cache_memoize(self):
        """Testing cache_memoize"""
        cacheKey = 'abc123'
        testStr = 'Test 123'

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return testStr

        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)
Exemplo n.º 15
0
    def test_cache_memoize_with_unicode_data(self):
        """Testing cache_memoize with Unicode data"""
        cache_key = 'abc123'
        test_str = '🙃' * 5

        def cache_func(cache_called=[]):
            self.assertFalse(cache_called)
            cache_called.append(True)
            return test_str

        result = cache_memoize(cache_key, cache_func)
        self.assertEqual(result, test_str)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cache_key, cache_func)
        self.assertEqual(result, test_str)
Exemplo n.º 16
0
    def get_commits(self, start=None):
        """Returns a list of commits.

        This is paginated via the 'start' parameter. Any exceptions are
        expected to be handled by the caller.
        """
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-commits:%s:%s' % (self.pk, start))
        if hosting_service:
            commits_callable = lambda: hosting_service.get_commits(self, start)
        else:
            commits_callable = lambda: self.get_scmtool().get_commits(start)

        # We cache both the entire list for 'start', as well as each individual
        # commit. This allows us to reduce API load when people are looking at
        # the "new review request" page more frequently than they're pushing
        # code, and will usually save 1 API request when they go to actually
        # create a new review request.
        commits = cache_memoize(cache_key, commits_callable)

        for commit in commits:
            cache.set(self.get_commit_cache_key(commit.id),
                      commit, self.COMMITS_CACHE_PERIOD)

        return commits
Exemplo n.º 17
0
    def get_text(self):
        """Return the file contents as a string.

        This will fetch the file and then cache it for future renders.
        """
        return cache_memoize('text-attachment-%d-string' % self.obj.pk,
                             self._get_text_uncached)
Exemplo n.º 18
0
    def test_cache_memoize(self):
        """Testing cache_memoize"""
        cacheKey = 'abc123'
        testStr = 'Test 123'

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return testStr

        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)
Exemplo n.º 19
0
    def get_branches(self):
        """Return a list of all branches on the repository.

        This will fetch a list of all known branches for use in the API and
        New Review Request page.

        Returns:
            list of reviewboard.scmtools.core.Branch:
            The list of branches in the repository. One (and only one) will
            be marked as the default branch.

        Raises:
            reviewboard.hostingsvcs.errors.HostingServiceError:
                The hosting service backing the repository encountered an
                error.

            reviewboard.scmtools.errors.SCMError:
                The repository tool encountered an error.

            NotImplementedError:
                Branch retrieval is not available for this type of repository.
        """
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-branches:%s' % self.pk)

        if hosting_service:
            branches_callable = lambda: hosting_service.get_branches(self)
        else:
            branches_callable = self.get_scmtool().get_branches

        return cache_memoize(cache_key, branches_callable,
                             self.BRANCHES_CACHE_PERIOD)
Exemplo n.º 20
0
    def test_cache_memoize_large_files_load_uncompressed(self):
        """Testing cache_memoize with large files without compression and
        loading data
        """
        cache_key = 'abc123'

        # This takes into account the size of the pickle data, and will
        # get us to exactly 2 chunks of data in cache.
        data, pickled_data = self._build_test_chunk_data(num_chunks=2)

        cache.set(make_cache_key(cache_key), '2')
        cache.set(make_cache_key('%s-0' % cache_key),
                  [pickled_data[:CACHE_CHUNK_SIZE]])
        cache.set(make_cache_key('%s-1' % cache_key),
                  [pickled_data[CACHE_CHUNK_SIZE:]])

        def cache_func():
            return ''

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)
Exemplo n.º 21
0
def view_feed(request, url, template_name="feedview/feed-page.html",
              cache_expiration=DEFAULT_EXPIRATION, extra_context={}):
    """
    Renders an RSS or Atom feed using the given template. This will use
    a cached copy if available in order to reduce hits to the server.
    """
    def fetch_feed():
        import feedparser

        data = urlopen(url).read()

        parser = feedparser.parse(data)

        context = {
            'parser': parser,
        }
        context.update(extra_context)

        return render_to_string(template_name,
                                RequestContext(request, context))

    try:
        return HttpResponse(cache_memoize("feed-%s" % url, fetch_feed,
                            cache_expiration,
                            force_overwrite=('reload' in request.GET)))
    except (URLError, http_client.HTTPException) as e:
        context = {
            'error': e,
        }
        context.update(extra_context)

        return render_to_response(template_name,
                                  RequestContext(request, context))
Exemplo n.º 22
0
    def test_cache_memoize_large_files_uncompressed_off_by_one(self):
        """Testing cache_memoize with large files without compression and
        one byte larger than an even chunk size."""
        cache_key = 'abc123'

        # This takes into account the size of the pickle data, and will
        # get us to just barely 3 chunks of data in cache.
        data = self._build_test_chunk_data(num_chunks=2)[0] + 'x'
        pickled_data = pickle.dumps(data, protocol=0)

        def cache_func():
            return data

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key,
                               cache_func,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertTrue(cache_func.spy.called)

        cache_key_0 = make_cache_key('%s-0' % cache_key)
        cache_key_1 = make_cache_key('%s-1' % cache_key)
        cache_key_2 = make_cache_key('%s-2' % cache_key)

        self.assertTrue(make_cache_key(cache_key) in cache)
        self.assertTrue(cache_key_0 in cache)
        self.assertTrue(cache_key_1 in cache)
        self.assertTrue(cache_key_2 in cache)
        self.assertFalse(make_cache_key('%s-3' % cache_key) in cache)

        # Verify the contents of the stored data.
        stored_data = b''.join(
            cache.get(cache_key_0) + cache.get(cache_key_1) +
            cache.get(cache_key_2))
        self.assertEqual(stored_data, pickled_data)

        # Try fetching the data we stored.
        cache_func.spy.reset_calls()

        result = cache_memoize(cache_key,
                               cache_func,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)
Exemplo n.º 23
0
    def get_text_lines(self):
        """Return the file contents as syntax-highlighted lines.

        This will fetch the file, render it however appropriate for the review
        UI, and split it into reviewable lines. It will then cache it for
        future renders.
        """
        return cache_memoize("text-attachment-%d-lines" % self.obj.pk, lambda: list(self.generate_highlighted_text()))
Exemplo n.º 24
0
 def get_thumbnail(self):
     """Return the thumbnail of the text file as rendered as html."""
     # Caches the generated thumbnail to eliminate the need on each page
     # reload to:
     # 1) re-read the file attachment
     # 2) re-generate the html based on the data read
     return cache_memoize('file-attachment-thumbnail-%s-html-%s'
                          % (self.__class__.__name__, self.attachment.pk),
                          self._generate_thumbnail)
Exemplo n.º 25
0
    def get_text_lines(self):
        """Return the file contents as syntax-highlighted lines.

        This will fetch the file, render it however appropriate for the review
        UI, and split it into reviewable lines. It will then cache it for
        future renders.
        """
        return cache_memoize('text-attachment-%d-lines' % self.obj.pk,
                             lambda: list(self.generate_highlighted_text()))
Exemplo n.º 26
0
 def get_thumbnail(self):
     """Returns the thumbnail of the text file as rendered as html"""
     # Caches the generated thumbnail to eliminate the need on each page
     # reload to:
     # 1) re-read the file attachment
     # 2) re-generate the html based on the data read
     return cache_memoize('file-attachment-thumbnail-%s-html-%s'
                          % (self.__class__.__name__, self.attachment.pk),
                          self._generate_thumbnail)
Exemplo n.º 27
0
def get_user_team_ids(user):
    """Return a set of I Done This team IDs that the user belongs to.

    Retrieves the set of teams from the I Done This API and caches it to
    avoid excessive requests. Team membership is not expected to change
    frequently, but the cache can be manually deleted if necessary.

    Args:
        user (django.contrib.auth.models.User):
            The user whose cached team IDs should be retrieved.

    Returns:
        set:
        The user's team IDs, or ``None`` if they could not be retrieved.
    """
    def _get_user_team_ids_uncached():
        request = create_idonethis_request(request_path='teams',
                                           api_token=api_token)
        logging.debug('IDoneThis: Loading teams for user "%s", '
                      'request "%s %s"',
                      user.username,
                      request.get_method(),
                      request.get_full_url())

        try:
            teams_data = urlopen(request).read()
        except (HTTPError, URLError) as e:
            if isinstance(e, HTTPError):
                error_info = '%s, error data: %s' % (e, e.read())
            else:
                error_info = e.reason

            logging.error('IDoneThis: Failed to load teams for user "%s", '
                          'request "%s %s": %s',
                          user.username,
                          request.get_method(),
                          request.get_full_url(),
                          error_info)
            raise

        return set(t['hash_id'] for t in json.loads(teams_data))

    api_token = get_user_api_token(user)

    if not api_token:
        return None

    try:
        return set(cache_memoize(_make_user_team_ids_cache_key(user),
                                 _get_user_team_ids_uncached,
                                 expiration=TEAM_IDS_CACHE_EXPIRATION))
    except Exception as e:
        logging.error('IDoneThis: Failed to load teams for user "%s": %s',
                      user.username,
                      e)
        return None
Exemplo n.º 28
0
def get_user_team_ids(user):
    """Return a set of I Done This team IDs that the user belongs to.

    Retrieves the set of teams from the I Done This API and caches it to
    avoid excessive requests. Team membership is not expected to change
    frequently, but the cache can be manually deleted if necessary.

    Args:
        user (django.contrib.auth.models.User):
            The user whose cached team IDs should be retrieved.

    Returns:
        set:
        The user's team IDs, or ``None`` if they could not be retrieved.
    """
    def _get_user_team_ids_uncached():
        request = create_idonethis_request(request_path='teams',
                                           api_token=api_token)
        logging.debug(
            'IDoneThis: Loading teams for user "%s", '
            'request "%s %s"', user.username, request.get_method(),
            request.get_full_url())

        try:
            teams_data = urlopen(request).read()
        except (HTTPError, URLError) as e:
            if isinstance(e, HTTPError):
                error_info = '%s, error data: %s' % (e, e.read())
            else:
                error_info = e.reason

            logging.error(
                'IDoneThis: Failed to load teams for user "%s", '
                'request "%s %s": %s', user.username, request.get_method(),
                request.get_full_url(), error_info)
            raise

        try:
            return set(t['hash_id'] for t in json.loads(teams_data))
        except Exception as e:
            logging.error(
                'IDoneThis: Failed to parse teams for user "%s": '
                '%s, teams data: %s', user.username, e, teams_data)
            raise

    api_token = get_user_api_token(user)

    if not api_token:
        return None

    try:
        return cache_memoize(_make_user_team_ids_cache_key(user),
                             _get_user_team_ids_uncached,
                             expiration=TEAM_IDS_CACHE_EXPIRATION)
    except Exception:
        return None
Exemplo n.º 29
0
    def test_cache_memoize_large_files_uncompressed_off_by_one(self):
        """Testing cache_memoize with large files without compression and
        one byte larger than an even chunk size."""
        cache_key = 'abc123'

        # This takes into account the size of the pickle data, and will
        # get us to just barely 3 chunks of data in cache.
        data = self._build_test_chunk_data(num_chunks=2)[0] + 'x'
        pickled_data = pickle.dumps(data)

        def cache_func():
            return data

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertTrue(cache_func.spy.called)

        cache_key_0 = make_cache_key('%s-0' % cache_key)
        cache_key_1 = make_cache_key('%s-1' % cache_key)
        cache_key_2 = make_cache_key('%s-2' % cache_key)

        self.assertTrue(make_cache_key(cache_key) in cache)
        self.assertTrue(cache_key_0 in cache)
        self.assertTrue(cache_key_1 in cache)
        self.assertTrue(cache_key_2 in cache)
        self.assertFalse(make_cache_key('%s-3' % cache_key) in cache)

        # Verify the contents of the stored data.
        stored_data = b''.join(cache.get(cache_key_0) +
                               cache.get(cache_key_1) +
                               cache.get(cache_key_2))
        self.assertEqual(stored_data, pickled_data)

        # Try fetching the data we stored.
        cache_func.spy.reset_calls()

        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)
Exemplo n.º 30
0
    def get_rendered_lines(self):
        """Returns the file contents as a render, based on the raw text.

        If a subclass sets ``can_render_text = True`` and implements
        ``generate_render``, then this will render the contents in some
        specialized form, cache it as a list of lines, and return it.
        """
        if self.can_render_text:
            return cache_memoize("text-attachment-%d-rendered" % self.obj.pk, lambda: list(self.generate_render()))
        else:
            return []
Exemplo n.º 31
0
    def get_branches(self):
        """Returns a list of branches."""
        hosting_service = self.hosting_service

        cache_key = make_cache_key("repository-branches:%s" % self.pk)
        if hosting_service:
            branches_callable = lambda: hosting_service.get_branches(self)
        else:
            branches_callable = self.get_scmtool().get_branches

        return cache_memoize(cache_key, branches_callable, self.BRANCHES_CACHE_PERIOD)
Exemplo n.º 32
0
    def get_file_exists(self, path, revision, base_commit_id=None, request=None):
        """Returns whether or not a file exists in the repository.

        If the repository is backed by a hosting service, this will go
        through that. Otherwise, it will attempt to directly access the
        repository.

        The result of this call will be cached, making future lookups
        of this path and revision on this repository faster.
        """
        key = self._make_file_exists_cache_key(path, revision, base_commit_id)

        if cache.get(make_cache_key(key)) == "1":
            return True

        exists = self._get_file_exists_uncached(path, revision, base_commit_id, request)

        if exists:
            cache_memoize(key, lambda: "1")

        return exists
Exemplo n.º 33
0
    def get_rendered_lines(self):
        """Returns the file contents as a render, based on the raw text.

        If a subclass sets ``can_render_text = True`` and implements
        ``generate_render``, then this will render the contents in some
        specialized form, cache it as a list of lines, and return it.
        """
        if self.can_render_text:
            return cache_memoize('text-attachment-%d-rendered' % self.obj.pk,
                                 lambda: list(self.generate_render()))
        else:
            return []
Exemplo n.º 34
0
    def get_branches(self):
        """Returns a list of branches."""
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-branches:%s' % self.pk)
        if hosting_service:
            branches_callable = lambda: hosting_service.get_branches(self)
        else:
            branches_callable = self.get_scmtool().get_branches

        return cache_memoize(cache_key, branches_callable,
                             self.BRANCHES_CACHE_PERIOD)
Exemplo n.º 35
0
    def get_file_exists(self,
                        path,
                        revision,
                        base_commit_id=None,
                        request=None):
        """Returns whether or not a file exists in the repository.

        If the repository is backed by a hosting service, this will go
        through that. Otherwise, it will attempt to directly access the
        repository.

        The result of this call will be cached, making future lookups
        of this path and revision on this repository faster.
        """
        if not isinstance(path, six.text_type):
            raise TypeError('"path" must be a Unicode string, not %s' %
                            type(path))

        if not isinstance(revision, six.text_type):
            raise TypeError('"revision" must be a Unicode string, not %s' %
                            type(revision))

        if (base_commit_id is not None
                and not isinstance(base_commit_id, six.text_type)):
            raise TypeError('"base_commit_id" must be a Unicode string, '
                            'not %s' % type(base_commit_id))

        key = self._make_file_exists_cache_key(path, revision, base_commit_id)

        if cache.get(make_cache_key(key)) == '1':
            return True

        exists = self._get_file_exists_uncached(path, revision, base_commit_id,
                                                request)

        if exists:
            cache_memoize(key, lambda: '1')

        return exists
Exemplo n.º 36
0
    def get_chunks(self, cache_key=None):
        """Return the chunks for the given diff information.

        If a cache key is provided and there are chunks already computed in the
        cache, they will be yielded. Otherwise, new chunks will be generated,
        stored in cache (given a cache key), and yielded.
        """
        if cache_key:
            chunks = cache_memoize(cache_key, lambda: list(self.get_chunks_uncached()), large_data=True)
        else:
            chunks = self.get_chunks_uncached()

        for chunk in chunks:
            yield chunk
Exemplo n.º 37
0
    def get_bug_info(self, repository, bug_id):
        """Get the information for the specified bug.

        This should return a dictionary with 'summary', 'description', and
        'status' keys.

        This is cached for 60 seconds to reduce the number of queries to the
        bug trackers and make things seem fast after the first infobox load,
        but is still a short enough time to give relatively fresh data.
        """
        return cache_memoize(self.make_bug_cache_key(repository, bug_id),
                             lambda: self.get_bug_info_uncached(repository,
                                                                bug_id),
                             expiration=60)
Exemplo n.º 38
0
    def get_bug_info(self, repository, bug_id):
        """Get the information for the specified bug.

        This should return a dictionary with 'summary', 'description', and
        'status' keys.

        This is cached for 60 seconds to reduce the number of queries to the
        bug trackers and make things seem fast after the first infobox load,
        but is still a short enough time to give relatively fresh data.
        """
        return cache_memoize(
            self.make_bug_cache_key(repository, bug_id),
            lambda: self.get_bug_info_uncached(repository, bug_id),
            expiration=60)
Exemplo n.º 39
0
    def render(self, request):
        """Render the widget.

        This will render the HTML for a widget. It takes care of generating
        and caching the data, depending on the widget's needs.
        """
        if self.has_data and self.data is None:
            if self.cache_data:
                self.data = cache_memoize(self.generate_cache_key(request),
                                          lambda: self.generate_data(request))
            else:
                self.data = self.generate_data(request)

        return super(Widget, self).render(request)
Exemplo n.º 40
0
    def test_cache_memoize_large_files_compressed(self):
        """Testing cache_memoize with large files with compression"""
        cache_key = 'abc123'

        data, pickled_data = self._build_test_chunk_data(num_chunks=2)

        def cache_func():
            return data

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key,
                               cache_func,
                               large_data=True,
                               compress_large_data=True)
        self.assertTrue(cache_func.spy.called)

        cache_key_0 = make_cache_key('%s-0' % cache_key)

        self.assertTrue(make_cache_key(cache_key) in cache)
        self.assertTrue(cache_key_0 in cache)
        self.assertFalse(make_cache_key('%s-1' % cache_key) in cache)
        self.assertFalse(make_cache_key('%s-2' % cache_key) in cache)

        # Verify the contents of the stored data.
        stored_data = cache.get(cache_key_0)[0]
        self.assertEqual(stored_data, zlib.compress(pickled_data))

        # Try fetching the data we stored.
        cache_func.spy.reset_calls()
        result = cache_memoize(cache_key,
                               cache_func,
                               large_data=True,
                               compress_large_data=True)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)
Exemplo n.º 41
0
def _fetch_dmarc_record(hostname, use_cache, cache_expiration):
    """Fetch a DMARC record from DNS, optionally caching it.

    This will query DNS for the DMARC record for a given hostname, returning
    the string contents of the record.

    The contents can be cached, preventing the need for subsequent DNS queries.

    This is used internally by :py:func:`get_dmarc_record`.

    Args:
        hostname (unicode):
            The hostname to fetch the record from.

        use_cache (bool, optional):
            Whether to use the cache for looking up and storing record data.

        cache_expiration (int, optional):
            The expiration time for cached data.

    Returns:
        DmarcRecord:
        The DMARC record. If it could not be found, or DNS lookup failed,
        ``None`` will be returned instead.
    """
    def _fetch_record():
        try:
            return dns_resolver.query('_dmarc.%s' % hostname,
                                      'TXT')[0].to_text()
        except (IndexError, dns_resolver.NXDOMAIN, dns_resolver.NoAnswer,
                dns_resolver.NoNameservers):
            raise ValueError

    try:
        if use_cache:
            record_str = cache_memoize(
                'dmarc-record-%s' % hostname,
                lambda: _fetch_record(),
                expiration=cache_expiration)
        else:
            record_str = _fetch_record()
    except ValueError:
        record_str = None

    if record_str:
        return DmarcRecord.parse(hostname, record_str)

    return None
Exemplo n.º 42
0
def _fetch_dmarc_record(hostname, use_cache, cache_expiration):
    """Fetch a DMARC record from DNS, optionally caching it.

    This will query DNS for the DMARC record for a given hostname, returning
    the string contents of the record.

    The contents can be cached, preventing the need for subsequent DNS queries.

    This is used internally by :py:func:`get_dmarc_record`.

    Args:
        hostname (unicode):
            The hostname to fetch the record from.

        use_cache (bool, optional):
            Whether to use the cache for looking up and storing record data.

        cache_expiration (int, optional):
            The expiration time for cached data.

    Returns:
        DmarcRecord:
        The DMARC record. If it could not be found, or DNS lookup failed,
        ``None`` will be returned instead.
    """
    def _fetch_record():
        try:
            return dns_resolver.query('_dmarc.%s' % hostname,
                                      'TXT')[0].to_text()
        except (IndexError, dns_resolver.NXDOMAIN, dns_resolver.NoAnswer,
                dns_resolver.NoNameservers):
            raise ValueError

    try:
        if use_cache:
            record_str = cache_memoize(
                'dmarc-record-%s' % hostname,
                lambda: _fetch_record(),
                expiration=cache_expiration)
        else:
            record_str = _fetch_record()
    except ValueError:
        record_str = None

    if record_str:
        return DmarcRecord.parse(hostname, record_str)

    return None
Exemplo n.º 43
0
    def get_comment_thumbnail(self, comment):
        """Get the "thumbnail" for a comment.

        This extracts the relevant paragraph that was commented on and returns
        it as HTML to be included in the list of reviews.
        """
        try:
            child_id = int(comment.extra_data['child_id'])
        except (KeyError, ValueError):
            # This may be a comment from before we had review UIs. Or,
            # corrupted data. Either way, we don't display anything.
            return None

        return cache_memoize(
            'markdown-attachment-comment-thumbnail-%d-%d' %
            (self.obj.pk, child_id), lambda: self._get_thumbnail(child_id))
Exemplo n.º 44
0
    def get_chunks(self, cache_key=None):
        """Return the chunks for the given diff information.

        If a cache key is provided and there are chunks already computed in the
        cache, they will be yielded. Otherwise, new chunks will be generated,
        stored in cache (given a cache key), and yielded.
        """
        if cache_key:
            chunks = cache_memoize(cache_key,
                                   lambda: list(self.get_chunks_uncached()),
                                   large_data=True)
        else:
            chunks = self.get_chunks_uncached()

        for chunk in chunks:
            yield chunk
Exemplo n.º 45
0
    def render_to_string(self):
        """Returns the diff as a string.

        The resulting diff may optimistically be pulled from the cache, if
        not rendering a custom line range. This makes diff rendering very
        quick.

        If operating with a cache, and the diff doesn't exist in the cache,
        it will be stored after render.
        """
        cache = self.allow_caching and not self.lines_of_context

        if cache:
            return cache_memoize(self.make_cache_key(), self.render_to_string_uncached)
        else:
            return self.render_to_string_uncached()
Exemplo n.º 46
0
def cached_javascript_catalog(request, domain='djangojs', packages=None):
    """A cached version of javascript_catalog."""
    global locale_serials

    package_str = '_'.join(packages)
    try:
        serial = locale_serials[package_str]
    except KeyError:
        serial = generate_locale_serial(packages)
        locale_serials[package_str] = serial

    return cache_memoize(
        'jsi18n-%s-%s-%s-%d' % (domain, package_str, get_language(), serial),
        lambda: javascript_catalog(request, domain, packages),
        large_data=True,
        compress_large_data=True)
Exemplo n.º 47
0
    def get_comment_thumbnail(self, comment):
        """Get the "thumbnail" for a comment.

        This extracts the relevant paragraph that was commented on and returns
        it as HTML to be included in the list of reviews.
        """
        try:
            child_id = int(comment.extra_data['child_id'])
        except (KeyError, ValueError):
            # This may be a comment from before we had review UIs. Or,
            # corrupted data. Either way, we don't display anything.
            return None

        return cache_memoize(
            'markdown-attachment-comment-thumbnail-%d-%d' % (self.obj.pk,
                                                             child_id),
            lambda: self._get_thumbnail(child_id))
Exemplo n.º 48
0
    def get_thumbnail(self):
        """Return the thumbnail of the text file as rendered as html.

        The content will be generated and then cached for future requests.

        Returns:
            django.utils.safestring.SafeText:
            The resulting HTML-safe thumbnail content.
        """
        # Caches the generated thumbnail to eliminate the need on each page
        # reload to:
        # 1) re-read the file attachment
        # 2) re-generate the html based on the data read
        return mark_safe(
            cache_memoize('file-attachment-thumbnail-%s-html-%s'
                          % (self.__class__.__name__, self.attachment.pk),
                          self._generate_thumbnail))
Exemplo n.º 49
0
    def render_to_string(self):
        """Returns the diff as a string.

        The resulting diff may optimistically be pulled from the cache, if
        not rendering a custom line range. This makes diff rendering very
        quick.

        If operating with a cache, and the diff doesn't exist in the cache,
        it will be stored after render.
        """
        cache = self.allow_caching and not self.lines_of_context

        if cache:
            return cache_memoize(self.make_cache_key(),
                                 self.render_to_string_uncached)
        else:
            return self.render_to_string_uncached()
Exemplo n.º 50
0
    def render(self, request):
        """Renders a widget.

        This will render the HTML for a widget. It takes care of generating
        and caching the data, depending on the widget's needs.
        """
        if self.has_data and self.data is None:
            if self.cache_data:
                self.data = cache_memoize(self.generate_cache_key(request),
                                          lambda: self.generate_data(request))
            else:
                self.data = self.generate_data(request)

        return render_to_string('admin/admin_widget.html',
                                RequestContext(request, {
                                    'widget': self,
                                }))
Exemplo n.º 51
0
    def render(self, request):
        """Renders a widget.

        This will render the HTML for a widget. It takes care of generating
        and caching the data, depending on the widget's needs.
        """
        if self.has_data and self.data is None:
            if self.cache_data:
                self.data = cache_memoize(self.generate_cache_key(request),
                                          lambda: self.generate_data(request))
            else:
                self.data = self.generate_data(request)

        return render_to_string('admin/admin_widget.html',
                                RequestContext(request, {
                                    'widget': self,
                                }))
Exemplo n.º 52
0
    def get_chunks(self):
        """Returns the chunks for the given diff information.

        If the file is binary or deleted, or if the file has moved with no
        additional changes, then an empty list of chunks will be returned.

        If there are chunks already computed in the cache, they will be
        returned. Otherwise, new chunks will be generated, stored in cache,
        and returned.
        """
        if (self.filediff.binary or self.filediff.deleted
                or self.filediff.source_revision == ''):
            return []

        return cache_memoize(self.make_cache_key(),
                             lambda: list(self._get_chunks_uncached()),
                             large_data=True)
Exemplo n.º 53
0
    def get_extra_context(self, request):
        """Return extra context for the template.

        Args:
            request (django.http.HttpRequest):
                The HTTP request from the client.

        Returns:
            dict:
            Extra context to pass to the template.
        """
        extra_context = cache_memoize(
            'admin-widget-repos-data',
            lambda: self._get_repositories_data(request))
        extra_context['add_repo_docs_url'] = \
            '%sadmin/configuration/repositories/' % get_manual_url()

        return extra_context
Exemplo n.º 54
0
    def get_chunks(self):
        """Returns the chunks for the given diff information.

        If the file is binary or deleted, or if the file has moved with no
        additional changes, then an empty list of chunks will be returned.

        If there are chunks already computed in the cache, they will be
        returned. Otherwise, new chunks will be generated, stored in cache,
        and returned.
        """
        if (self.filediff.binary or
                self.filediff.deleted or
                self.filediff.source_revision == ''):
            return []

        return cache_memoize(self.make_cache_key(),
                             lambda: list(self._get_chunks_uncached()),
                             large_data=True)
Exemplo n.º 55
0
    def get_comment_thumbnail(self, comment):
        """Generates and returns a thumbnail representing this comment.

        This will find the appropriate lines the comment applies to and
        return it as HTML suited for rendering in reviews.
        """
        try:
            begin_line_num = int(comment.extra_data["beginLineNum"])
            end_line_num = int(comment.extra_data["endLineNum"])
            view_mode = comment.extra_data["viewMode"]
        except (KeyError, ValueError):
            # This may be a comment from before we had review UIs. Or,
            # corrupted data. Either way, don't display anything.
            return None

        return cache_memoize(
            "text-review-ui-comment-thumbnail-%s-%s" % (self.obj.pk, comment.pk),
            lambda: self.render_comment_thumbnail(comment, begin_line_num, end_line_num, view_mode),
        )
Exemplo n.º 56
0
    def get_all_consent(self, user):
        """Return all consent decisions made by a given user.

        It's important to note that a user may not have made a decision on
        consent for a given registered requirement, in which case the results
        will not include an entry for that requirement.

        Args:
            user (django.contrib.auth.models.User):
                The user to return all consent information for.

        Returns:
            dict:
            A dictionary of
            :py:class:`~djblets.privacy.consent.base.BaseConsentRequirement`
            IDs to :py:class:`~djblets.privacy.consent.base.Consent` values.
        """
        return cache_memoize(self._get_user_cache_key(user),
                             lambda: self.get_all_consent_uncached(user))
Exemplo n.º 57
0
    def get_file(self, path, revision, base_commit_id=None, request=None):
        """Returns a file from the repository.

        This will attempt to retrieve the file from the repository. If the
        repository is backed by a hosting service, it will go through that.
        Otherwise, it will attempt to directly access the repository.
        """
        # We wrap the result of get_file in a list and then return the first
        # element after getting the result from the cache. This prevents the
        # cache backend from converting to unicode, since we're no longer
        # passing in a string and the cache backend doesn't recursively look
        # through the list in order to convert the elements inside.
        #
        # Basically, this fixes the massive regressions introduced by the
        # Django unicode changes.
        return cache_memoize(
            self._make_file_cache_key(path, revision, base_commit_id),
            lambda: [self._get_file_uncached(path, revision, base_commit_id,
                                             request)],
            large_data=True)[0]
Exemplo n.º 58
0
    def test_cache_memoize_large_files_load_compressed(self):
        """Testing cache_memoize with large files with compression and
        loading cached data
        """
        cache_key = 'abc123'

        data, pickled_data = self._build_test_chunk_data(num_chunks=2)
        stored_data = zlib.compress(pickled_data)
        self.assertTrue(len(stored_data) < CACHE_CHUNK_SIZE)

        cache.set(make_cache_key(cache_key), '1')
        cache.set(make_cache_key('%s-0' % cache_key), [stored_data])

        def cache_func():
            return ''

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=True)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)