コード例 #1
0
    def get_file_exists(self,
                        path,
                        revision,
                        base_commit_id=None,
                        request=None):
        """Returns whether or not a file exists in the repository.

        If the repository is backed by a hosting service, this will go
        through that. Otherwise, it will attempt to directly access the
        repository.

        The result of this call will be cached, making future lookups
        of this path and revision on this repository faster.
        """
        key = self._make_file_exists_cache_key(path, revision, base_commit_id)

        if cache.get(make_cache_key(key)) == '1':
            return True

        exists = self._get_file_exists_uncached(path, revision, base_commit_id,
                                                request)

        if exists:
            cache_memoize(key, lambda: '1')

        return exists
コード例 #2
0
ファイル: tests.py プロジェクト: zenmurugan/djblets
    def test_cache_memoize_large_files(self):
        """Testing cache_memoize with large files"""
        cacheKey = "abc123"

        # This takes into account the size of the pickle data, and will
        # get us to exactly 2 chunks of data in cache.
        data = 'x' * (CACHE_CHUNK_SIZE * 2 - 8)

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return data

        result = cache_memoize(cacheKey,
                               cacheFunc,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)

        site = Site.objects.get_current()
        full_key = '%s:%s' % (site.domain, cacheKey)
        self.assertTrue(cache.has_key(full_key))
        self.assertTrue(cache.has_key('%s-0' % full_key))
        self.assertTrue(cache.has_key('%s-1' % full_key))
        self.assertFalse(cache.has_key('%s-2' % full_key))

        result = cache_memoize(cacheKey,
                               cacheFunc,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
コード例 #3
0
ファイル: tests.py プロジェクト: dekoza/djblets
    def test_cache_memoize_large_files(self):
        """Testing cache_memoize with large files"""
        cacheKey = "abc123"

        # This takes into account the size of the pickle data, and will
        # get us to exactly 2 chunks of data in cache.
        data = 'x' * (CACHE_CHUNK_SIZE * 2 - 8)

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return data

        result = cache_memoize(cacheKey, cacheFunc, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)

        site = Site.objects.get_current()
        full_key = '%s:%s' % (site.domain, cacheKey)
        self.assertTrue(cache.has_key(full_key))
        self.assertTrue(cache.has_key('%s-0' % full_key))
        self.assertTrue(cache.has_key('%s-1' % full_key))
        self.assertFalse(cache.has_key('%s-2' % full_key))

        result = cache_memoize(cacheKey, cacheFunc, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
コード例 #4
0
    def get_commits(self, start=None):
        """Returns a list of commits.

        This is paginated via the 'start' parameter. Any exceptions are
        expected to be handled by the caller.
        """
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-commits:%s:%s' %
                                   (self.pk, start))
        if hosting_service:
            commits_callable = lambda: hosting_service.get_commits(self, start)
        else:
            commits_callable = lambda: self.get_scmtool().get_commits(start)

        # We cache both the entire list for 'start', as well as each individual
        # commit. This allows us to reduce API load when people are looking at
        # the "new review request" page more frequently than they're pushing
        # code, and will usually save 1 API request when they go to actually
        # create a new review request.
        commits = cache_memoize(cache_key, commits_callable)

        for commit in commits:
            cache.set(self.get_commit_cache_key(commit.id), commit,
                      self.COMMITS_CACHE_PERIOD)

        return commits
コード例 #5
0
ファイル: views.py プロジェクト: BobPyron/SideBySideDiff
def build_diff_fragment(request, file, chunkindex, highlighting, collapseall,
                        context,
                        template_name='diffviewer/diff_file_fragment.html'):
    key = template_name + '-'

    if file['force_interdiff']:
        if file['interfilediff']:
            key += 'interdiff-%s-%s' % (file['filediff'].id,
                                        file['interfilediff'].id)
        else:
            key += 'interdiff-%s-none' % file['filediff'].id
    else:
        key += str(file['filediff'].id)

    if chunkindex:
        chunkindex = int(chunkindex)
        if chunkindex < 0 or chunkindex >= len(file['chunks']):
            raise UserVisibleError(_(u"Invalid chunk index %s specified.") % \
                                   chunkindex)

        file['chunks'] = [file['chunks'][chunkindex]]
        key += '-chunk-%s' % chunkindex

    if collapseall:
        key += '-collapsed'
        context['collapseall'] = True

    if highlighting:
        key += '-highlighting'

    context['file'] = file

    return cache_memoize(key,
        lambda: render_to_string(template_name,
                                 RequestContext(request, context)))
コード例 #6
0
ファイル: views.py プロジェクト: NN---/reviewboard
def build_diff_fragment(
    request, file, chunkindex, highlighting, collapseall, context, template_name="diffviewer/diff_file_fragment.html"
):
    key = "%s-%s-%s-" % (template_name, file["index"], file["filediff"].diffset.revision)

    if file["force_interdiff"]:
        if file["interfilediff"]:
            key += "interdiff-%s-%s" % (file["filediff"].id, file["interfilediff"].id)
        else:
            key += "interdiff-%s-none" % file["filediff"].id
    else:
        key += str(file["filediff"].id)

    if chunkindex:
        chunkindex = int(chunkindex)
        if chunkindex < 0 or chunkindex >= len(file["chunks"]):
            raise UserVisibleError(_(u"Invalid chunk index %s specified.") % chunkindex)

        file["chunks"] = [file["chunks"][chunkindex]]
        key += "-chunk-%s" % chunkindex

    if collapseall:
        key += "-collapsed"
        context["collapseall"] = True

    if highlighting:
        key += "-highlighting"

    key += "-%s" % settings.AJAX_SERIAL

    context["file"] = file

    return cache_memoize(key, lambda: render_to_string(template_name, RequestContext(request, context)))
コード例 #7
0
ファイル: tests.py プロジェクト: dekoza/djblets
    def test_cache_memoize(self):
        """Testing cache_memoize"""
        cacheKey = "abc123"
        testStr = "Test 123"

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return testStr

        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)
コード例 #8
0
def populate_diff_chunks(files, enable_syntax_highlighting=True):
    """Populates a list of diff files with chunk data.

    This accepts a list of files (generated by get_diff_files) and generates
    diff chunk data for each file in the list. The chunk data is stored in
    the file state.
    """
    key_prefix = "diff-sidebyside-"

    if enable_syntax_highlighting:
        key_prefix += "hl-"

    for file in files:
        filediff = file['filediff']
        interfilediff = file['interfilediff']
        force_interdiff = file['force_interdiff']
        chunks = []

        # If the file is binary or deleted, don't get chunks. Also don't
        # get chunks if there is no source_revision, which occurs if a
        # file has moved and has no changes.
        if (not filediff.binary and not filediff.deleted
                and filediff.source_revision != ''):
            key = key_prefix

            if not force_interdiff:
                key += str(filediff.pk)
            elif interfilediff:
                key += "interdiff-%s-%s" % (filediff.pk, interfilediff.pk)
            else:
                key += "interdiff-%s-none" % filediff.pk

            chunks = cache_memoize(
                key,
                lambda: list(
                    get_chunks(filediff.diffset, filediff, interfilediff,
                               force_interdiff, enable_syntax_highlighting)),
                large_data=True)

        file.update({
            'chunks': chunks,
            'num_chunks': len(chunks),
            'changed_chunk_indexes': [],
            'whitespace_only': True,
        })

        for j, chunk in enumerate(chunks):
            chunk['index'] = j

            if chunk['change'] != 'equal':
                file['changed_chunk_indexes'].append(j)
                meta = chunk.get('meta', {})

                if not meta.get('whitespace_chunk', False):
                    file['whitespace_only'] = False

        file.update({
            'num_changes': len(file['changed_chunk_indexes']),
            'chunks_loaded': True,
        })
コード例 #9
0
ファイル: models.py プロジェクト: cwest/reviewboard
    def get_commits(self, start=None):
        """Returns a list of commits.

        This is paginated via the 'start' parameter. Any exceptions are
        expected to be handled by the caller.
        """
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-commits:%s:%s' % (self.pk, start))
        if hosting_service:
            commits_callable = lambda: hosting_service.get_commits(self, start)
        else:
            commits_callable = lambda: self.get_scmtool().get_commits(start)

        # We cache both the entire list for 'start', as well as each individual
        # commit. This allows us to reduce API load when people are looking at
        # the "new review request" page more frequently than they're pushing
        # code, and will usually save 1 API request when they go to actually
        # create a new review request.
        commits = cache_memoize(cache_key, commits_callable)

        for commit in commits:
            cache.set(self.get_commit_cache_key(commit.id),
                      commit, self.COMMITS_CACHE_PERIOD)

        return commits
コード例 #10
0
ファイル: views.py プロジェクト: atempcode1/reviewboard
def get_media_urls():
    def scan_media_files():
        media_prefix = settings.MEDIA_URL

        if not media_prefix.startswith("http"):
            site = Site.objects.get_current()
            media_prefix = "%s://%s%s" % (domain_method, site.domain, media_prefix)

        paths = []

        for media_dir in settings.MEDIA_SERIAL_DIRS:
            media_path = os.path.join(settings.MEDIA_ROOT, media_dir)

            for root, dirs, files in os.walk(media_path):
                for name in files:
                    if not name.startswith(".") and os.path.splitext(name)[1] in VALID_EXTENSIONS:

                        path = os.path.relpath(os.path.join(root, name), settings.MEDIA_ROOT)
                        paths.append(
                            {
                                "url": "%s%s?%s" % (media_prefix, path, settings.MEDIA_SERIAL),
                                "matchQuery": {"hasAll": str(settings.MEDIA_SERIAL)},
                            }
                        )

        return paths

    siteconfig = SiteConfiguration.objects.get_current()
    domain_method = siteconfig.get("site_domain_method")

    key = "%s-media-serial-urls-%s" % (domain_method, settings.MEDIA_SERIAL)

    return cache_memoize(key, scan_media_files)
コード例 #11
0
ファイル: diffutils.py プロジェクト: Catherine1/reviewboard
def populate_diff_chunks(files, enable_syntax_highlighting=True):
    """Populates a list of diff files with chunk data.

    This accepts a list of files (generated by get_diff_files) and generates
    diff chunk data for each file in the list. The chunk data is stored in
    the file state.
    """
    key_prefix = "diff-sidebyside-"

    if enable_syntax_highlighting:
        key_prefix += "hl-"

    for file in files:
        filediff = file['filediff']
        interfilediff = file['interfilediff']
        force_interdiff = file['force_interdiff']
        chunks = []

        # If the file is binary or deleted, don't get chunks. Also don't
        # get chunks if there is no source_revision, which occurs if a
        # file has moved and has no changes.
        if (not filediff.binary and not filediff.deleted and
            filediff.source_revision != ''):
            key = key_prefix

            if not force_interdiff:
                key += str(filediff.pk)
            elif interfilediff:
                key += "interdiff-%s-%s" % (filediff.pk, interfilediff.pk)
            else:
                key += "interdiff-%s-none" % filediff.pk

            chunks = cache_memoize(
                key,
                lambda: list(get_chunks(filediff.diffset,
                                        filediff, interfilediff,
                                        force_interdiff,
                                        enable_syntax_highlighting)),
                large_data=True)

        file.update({
            'chunks': chunks,
            'changed_chunk_indexes': [],
            'whitespace_only': True,
        })

        for j, chunk in enumerate(chunks):
            chunk['index'] = j

            if chunk['change'] != 'equal':
                file['changed_chunk_indexes'].append(j)
                meta = chunk.get('meta', {})

                if not meta.get('whitespace_chunk', False):
                    file['whitespace_only'] = False

        file.update({
            'num_changes': len(file['changed_chunk_indexes']),
            'chunks_loaded': True,
        })
コード例 #12
0
ファイル: tests.py プロジェクト: zenmurugan/djblets
    def test_cache_memoize(self):
        """Testing cache_memoize"""
        cacheKey = "abc123"
        testStr = "Test 123"

        def cacheFunc(cacheCalled=[]):
            self.assertTrue(not cacheCalled)
            cacheCalled.append(True)
            return testStr

        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)
コード例 #13
0
ファイル: tests.py プロジェクト: Kamlani/djblets
    def testCacheMemoize(self):
        """Testing cache_memoize"""
        cacheKey = "abc123"
        testStr = "Test 123"

        def cacheFunc(cacheCalled=False):
            self.assert_(not cacheCalled)
            cacheCalled = True
            return testStr

        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)
コード例 #14
0
    def testCacheMemoize(self):
        """Testing cache_memoize"""
        cacheKey = "abc123"
        testStr = "Test 123"

        def cacheFunc(cacheCalled=False):
            self.assert_(not cacheCalled)
            cacheCalled = True
            return testStr

        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)

        # Call a second time. We should only call cacheFunc once.
        result = cache_memoize(cacheKey, cacheFunc)
        self.assertEqual(result, testStr)
コード例 #15
0
ファイル: views.py プロジェクト: vitormazzi/djblets
def view_feed(
    request, url, template_name="feedview/feed-page.html", cache_expiration=DEFAULT_EXPIRATION, extra_context={}
):
    """
    Renders an RSS or Atom feed using the given template. This will use
    a cached copy if available in order to reduce hits to the server.
    """

    def fetch_feed():
        from djblets.feedview import feedparser

        data = urllib2.urlopen(url).read()

        parser = feedparser.parse(data)

        context = {"parser": parser}
        context.update(extra_context)

        return render_to_string(template_name, RequestContext(request, context))

    try:
        return HttpResponse(
            cache_memoize("feed-%s" % url, fetch_feed, cache_expiration, force_overwrite=request.GET.has_key("reload"))
        )
    except urllib2.URLError, e:
        context = {"error": e}
        context.update(extra_context)

        return render_to_response(template_name, RequestContext(request, context))
コード例 #16
0
ファイル: mimetypes.py プロジェクト: prodigeni/reviewboard
 def get_thumbnail(self):
     """Returns the thumbnail of the text file as rendered as html"""
     # Caches the generated thumbnail to eliminate the need on each page
     # reload to:
     # 1) re-read the file attachment
     # 2) re-generate the html based on the data read
     return cache_memoize('file-attachment-thumbnail-%s-html-%s'
                          % (self.__class__.__name__, self.attachment.pk),
                          self._generate_thumbnail)
コード例 #17
0
ファイル: mimetypes.py プロジェクト: yangtina/reviewboard
 def get_thumbnail(self):
     """Returns the thumbnail of the text file as rendered as html"""
     # Caches the generated thumbnail to eliminate the need on each page
     # reload to:
     # 1) re-read the file attachment
     # 2) re-generate the html based on the data read
     return cache_memoize(
         'file-attachment-thumbnail-%s-html-%s' %
         (self.__class__.__name__, self.attachment.pk),
         self._generate_thumbnail)
コード例 #18
0
ファイル: models.py プロジェクト: cwest/reviewboard
    def get_branches(self):
        """Returns a list of branches."""
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-branches:%s' % self.pk)
        if hosting_service:
            branches_callable = lambda: hosting_service.get_branches(self)
        else:
            branches_callable = self.get_scmtool().get_branches

        return cache_memoize(cache_key, branches_callable,
                             self.BRANCHES_CACHE_PERIOD)
コード例 #19
0
ファイル: models.py プロジェクト: cwest/reviewboard
    def get_file_exists(self, path, revision, request=None):
        """Returns whether or not a file exists in the repository.

        If the repository is backed by a hosting service, this will go
        through that. Otherwise, it will attempt to directly access the
        repository.

        The result of this call will be cached, making future lookups
        of this path and revision on this repository faster.
        """
        key = self._make_file_exists_cache_key(path, revision)

        if cache.get(make_cache_key(key)) == '1':
            return True

        exists = self._get_file_exists_uncached(path, revision, request)

        if exists:
            cache_memoize(key, lambda: '1')

        return exists
コード例 #20
0
    def get_branches(self):
        """Returns a list of branches."""
        hosting_service = self.hosting_service

        cache_key = make_cache_key('repository-branches:%s' % self.pk)
        if hosting_service:
            branches_callable = lambda: hosting_service.get_branches(self)
        else:
            branches_callable = self.get_scmtool().get_branches

        return cache_memoize(cache_key, branches_callable,
                             self.BRANCHES_CACHE_PERIOD)
コード例 #21
0
ファイル: widgets.py プロジェクト: isakb/reviewboard
def get_groups(request):
    """Review group listing.

    Shows a list of recently created groups.
    """
    return {
        'size': 'widget-small',
        'template': 'admin/widgets/w-groups.html',
        'actions': [
            ('db/reviews/group/add/', _("Add")),
            ('db/reviews/group/', _("View All"))
        ],
        'data': cache_memoize("w-groups-" + str(datetime.date.today()),
                              lambda: Group.objects.all().order_by('-id')[:5]),
    }
コード例 #22
0
ファイル: views.py プロジェクト: mozfreddyb/djblets
def cached_javascript_catalog(request, domain='djangojs', packages=None):
    """A cached version of javascript_catalog."""
    global locale_serials

    package_str = '_'.join(packages)
    try:
        serial = locale_serials[package_str]
    except KeyError:
        serial = generate_locale_serial(packages)
        locale_serials[package_str] = serial

    return cache_memoize(
        'jsi18n-%s-%s-%s-%d' % (domain, package_str, get_language(), serial),
        lambda: javascript_catalog(request, domain, packages),
        large_data=True,
        compress_large_data=True)
コード例 #23
0
ファイル: renderers.py プロジェクト: offchooffcho/reviewboard
    def render_to_string(self):
        """Returns the diff as a string.

        The resulting diff may optimistically be pulled from the cache, if
        not rendering a custom line range. This makes diff rendering very
        quick.

        If operating with a cache, and the diff doesn't exist in the cache,
        it will be stored after render.
        """
        cache = not self.lines_of_context

        if cache:
            return cache_memoize(self.make_cache_key(),
                                 self.render_to_string_uncached)
        else:
            return self.render_to_string_uncached()
コード例 #24
0
ファイル: renderers.py プロジェクト: helloconan/reviewboard
    def render_to_string(self):
        """Returns the diff as a string.

        The resulting diff may optimistically be pulled from the cache, if
        not rendering a custom line range. This makes diff rendering very
        quick.

        If operating with a cache, and the diff doesn't exist in the cache,
        it will be stored after render.
        """
        cache = self.allow_caching and not self.lines_of_context

        if cache:
            return cache_memoize(self.make_cache_key(),
                                 self.render_to_string_uncached)
        else:
            return self.render_to_string_uncached()
コード例 #25
0
ファイル: widgets.py プロジェクト: justzx2011/reviewboard
    def render(self, request):
        """Renders a widget.

        This will render the HTML for a widget. It takes care of generating
        and caching the data, depending on the widget's needs.
        """
        if self.has_data and self.data is None:
            if self.cache_data:
                self.data = cache_memoize(self.generate_cache_key(request),
                                          lambda: self.generate_data(request))
            else:
                self.data = self.generate_data(request)

        return render_to_string('admin/admin_widget.html',
                                RequestContext(request, {
            'widget': self,
        }))
コード例 #26
0
ファイル: markdownui.py プロジェクト: prodigeni/reviewboard
    def get_comment_thumbnail(self, comment):
        """Get the "thumbnail" for a comment.

        This extracts the relevant paragraph that was commented on and returns
        it as HTML to be included in the list of reviews.
        """
        try:
            child_id = int(comment.extra_data['child_id'])
        except (KeyError, ValueError):
            # This may be a comment from before we had review UIs. Or,
            # corrupted data. Either way, we don't display anything.
            return None

        return cache_memoize(
            'markdown-attachment-comment-thumbnail-%d-%d' % (self.obj.pk,
                                                             child_id),
            lambda: self._get_thumbnail(child_id))
コード例 #27
0
    def get_chunks(self):
        """Returns the chunks for the given diff information.

        If the file is binary or deleted, or if the file has moved with no
        additional changes, then an empty list of chunks will be returned.

        If there are chunks already computed in the cache, they will be
        returned. Otherwise, new chunks will be generated, stored in cache,
        and returned.
        """
        if (self.filediff.binary or self.filediff.deleted
                or self.filediff.source_revision == ''):
            return []

        return cache_memoize(self.make_cache_key(),
                             lambda: list(self._get_chunks_uncached()),
                             large_data=True)
コード例 #28
0
ファイル: widgets.py プロジェクト: yangtina/reviewboard
    def render(self, request):
        """Renders a widget.

        This will render the HTML for a widget. It takes care of generating
        and caching the data, depending on the widget's needs.
        """
        if self.has_data and self.data is None:
            if self.cache_data:
                self.data = cache_memoize(self.generate_cache_key(request),
                                          lambda: self.generate_data(request))
            else:
                self.data = self.generate_data(request)

        return render_to_string('admin/admin_widget.html',
                                RequestContext(request, {
                                    'widget': self,
                                }))
コード例 #29
0
    def get_chunks(self):
        """Returns the chunks for the given diff information.

        If the file is binary or deleted, or if the file has moved with no
        additional changes, then an empty list of chunks will be returned.

        If there are chunks already computed in the cache, they will be
        returned. Otherwise, new chunks will be generated, stored in cache,
        and returned.
        """
        if (self.filediff.binary or
                self.filediff.deleted or
                self.filediff.source_revision == ''):
            return []

        return cache_memoize(self.make_cache_key(),
                             lambda: list(self._get_chunks_uncached()),
                             large_data=True)
コード例 #30
0
def get_original_file(filediff):
    """
    Get a file either from the cache or the SCM, applying the parent diff if
    it exists.

    SCM exceptions are passed back to the caller.
    """
    data = ""

    if filediff.source_revision != PRE_CREATION:

        def fetch_file(file, revision):
            log_timer = log_timed("Fetching file '%s' r%s from %s" %
                                  (file, revision, repository))
            data = tool.get_file(file, revision)
            data = convert_line_endings(data)
            log_timer.done()
            return data

        repository = filediff.diffset.repository
        tool = repository.get_scmtool()
        file = filediff.source_file
        revision = filediff.source_revision

        key = "%s:%s:%s" % (urlquote(filediff.diffset.repository.path),
                            urlquote(file), urlquote(revision))

        # We wrap the result of get_file in a list and then return the first
        # element after getting the result from the cache. This prevents the
        # cache backend from converting to unicode, since we're no longer
        # passing in a string and the cache backend doesn't recursively look
        # through the list in order to convert the elements inside.
        #
        # Basically, this fixes the massive regressions introduced by the
        # Django unicode changes.
        data = cache_memoize(key,
                             lambda: [fetch_file(file, revision)],
                             large_data=True)[0]

    # If there's a parent diff set, apply it to the buffer.
    if filediff.parent_diff:
        data = patch(filediff.parent_diff, data, filediff.source_file)

    return data
コード例 #31
0
ファイル: widgets.py プロジェクト: isakb/reviewboard
def get_repositories(request):
    """Shows a list of repositories in the system.

    This widget displays a table with the most recent repositories.
    """
    def repo_data():
        return Repository.objects.accessible(request.user).order_by('-id')[:3]

    key = "w-repositories-" + str(datetime.date.today())

    return {
        'size': 'widget-large',
        'template': 'admin/widgets/w-repositories.html',
        'actions': [
            ('db/scmtools/repository/add/', _("Add")),
            ('db/scmtools/repository/',  _("View All"), 'btn-right')
        ],
        'data': cache_memoize(key, repo_data)
    }
コード例 #32
0
ファイル: models.py プロジェクト: inratep/reviewboard
    def get_file(self, path, revision, request=None):
        """Returns a file from the repository.

        This will attempt to retrieve the file from the repository. If the
        repository is backed by a hosting service, it will go through that.
        Otherwise, it will attempt to directly access the repository.
        """
        # We wrap the result of get_file in a list and then return the first
        # element after getting the result from the cache. This prevents the
        # cache backend from converting to unicode, since we're no longer
        # passing in a string and the cache backend doesn't recursively look
        # through the list in order to convert the elements inside.
        #
        # Basically, this fixes the massive regressions introduced by the
        # Django unicode changes.
        return cache_memoize(
            self._make_file_cache_key(path, revision),
            lambda: [self._get_file_uncached(path, revision, request)],
            large_data=True)[0]
コード例 #33
0
ファイル: widgets.py プロジェクト: isakb/reviewboard
def get_user_activity_widget(request):
    """User activity widget.

    A pie chart of active application users based on their last login date.
    """
    def activity_data():
        now = datetime.date.today()
        users = User.objects

        week = datetime.timedelta(days=7)
        day = datetime.timedelta(days=1)
        month = datetime.timedelta(days=30)
        two_months = datetime.timedelta(days=60)
        three_months = datetime.timedelta(days=90)

        one_day = (now - week, now + day)
        seven_days = (now - month, now - week)
        thirty_days = (now - two_months, now - month)
        sixty_days = (now - three_months, now - two_months)
        ninety_days = now - three_months

        return {
            'now': users.filter(last_login__range=one_day).count(),
            'seven_days': users.filter(last_login__range=seven_days).count(),
            'thirty_days': users.filter(last_login__range=thirty_days).count(),
            'sixty_days': users.filter(last_login__range=sixty_days).count(),
            'ninety_days': users.filter(last_login__lte=ninety_days).count(),
            'total': users.count()
        }

    widget_actions = [
        ('db/auth/user/add/', _("Add New")),
        ('db/auth/user/', _("Manage Users"), 'btn-right')
    ]

    key = "w-user-activity-" + str(datetime.date.today())

    return {
        'size': 'widget-large',
        'template': 'admin/widgets/w-user-activity.html',
        'data': cache_memoize(key, activity_data),
        'actions': widget_actions
    }
コード例 #34
0
ファイル: models.py プロジェクト: cwest/reviewboard
    def get_file(self, path, revision, request=None):
        """Returns a file from the repository.

        This will attempt to retrieve the file from the repository. If the
        repository is backed by a hosting service, it will go through that.
        Otherwise, it will attempt to directly access the repository.
        """
        # We wrap the result of get_file in a list and then return the first
        # element after getting the result from the cache. This prevents the
        # cache backend from converting to unicode, since we're no longer
        # passing in a string and the cache backend doesn't recursively look
        # through the list in order to convert the elements inside.
        #
        # Basically, this fixes the massive regressions introduced by the
        # Django unicode changes.
        return cache_memoize(
            self._make_file_cache_key(path, revision),
            lambda: [self._get_file_uncached(path, revision, request)],
            large_data=True)[0]
コード例 #35
0
def build_diff_fragment(request,
                        file,
                        chunkindex,
                        highlighting,
                        collapseall,
                        context,
                        template_name='diffviewer/diff_file_fragment.html'):
    key = "%s-%s-%s-" % (template_name, file['index'],
                         file['filediff'].diffset.revision)

    if file['force_interdiff']:
        if file['interfilediff']:
            key += 'interdiff-%s-%s' % (file['filediff'].id,
                                        file['interfilediff'].id)
        else:
            key += 'interdiff-%s-none' % file['filediff'].id
    else:
        key += str(file['filediff'].id)

    if chunkindex:
        chunkindex = int(chunkindex)
        if chunkindex < 0 or chunkindex >= len(file['chunks']):
            raise UserVisibleError(_(u"Invalid chunk index %s specified.") % \
                                   chunkindex)

        file['chunks'] = [file['chunks'][chunkindex]]
        key += '-chunk-%s' % chunkindex

    if collapseall:
        key += '-collapsed'
        context['collapseall'] = True

    if highlighting:
        key += '-highlighting'

    key += '-%s' % settings.AJAX_SERIAL

    context['file'] = file

    return cache_memoize(
        key, lambda: render_to_string(template_name,
                                      RequestContext(request, context)))
コード例 #36
0
ファイル: diffutils.py プロジェクト: ballarin/reviewboard
def get_original_file(filediff):
    """
    Get a file either from the cache or the SCM, applying the parent diff if
    it exists.

    SCM exceptions are passed back to the caller.
    """
    data = ""

    if filediff.source_revision != PRE_CREATION:
        def fetch_file(file, revision):
            log_timer = log_timed("Fetching file '%s' r%s from %s" %
                                  (file, revision, repository))
            data = tool.get_file(file, revision)
            data = convert_line_endings(data)
            log_timer.done()
            return data

        repository = filediff.diffset.repository
        tool = repository.get_scmtool()
        file = filediff.source_file
        revision = filediff.source_revision

        key = "%s:%s:%s" % (urlquote(filediff.diffset.repository.path),
                            urlquote(file), urlquote(revision))

        # We wrap the result of get_file in a list and then return the first
        # element after getting the result from the cache. This prevents the
        # cache backend from converting to unicode, since we're no longer
        # passing in a string and the cache backend doesn't recursively look
        # through the list in order to convert the elements inside.
        #
        # Basically, this fixes the massive regressions introduced by the
        # Django unicode changes.
        data = cache_memoize(key, lambda: [fetch_file(file, revision)],
                             large_data=True)[0]

    # If there's a parent diff set, apply it to the buffer.
    if filediff.parent_diff:
        data = patch(filediff.parent_diff, data, filediff.source_file)

    return data
コード例 #37
0
ファイル: widgets.py プロジェクト: isakb/reviewboard
def get_request_statuses(request):
    """Request statuses by percentage widget.

    A pie chart showing review request by status.
    """
    def status_data():
        request_objects = ReviewRequest.objects.all()

        return {
            'pending': request_objects.filter(status="P").count(),
            'draft': request_objects.filter(status="D").count(),
            'submit': request_objects.filter(status="S").count()
        }

    key = "w-request-statuses-" + str(datetime.date.today())

    return {
        'size': 'widget-small',
        'template': 'admin/widgets/w-request-statuses.html',
        'actions': '',
        'data': cache_memoize(key, status_data)
    }
コード例 #38
0
def view_feed(request,
              url,
              template_name="feedview/feed-page.html",
              cache_expiration=DEFAULT_EXPIRATION,
              extra_context={}):
    """
    Renders an RSS or Atom feed using the given template. This will use
    a cached copy if available in order to reduce hits to the server.
    """
    def fetch_feed():
        from djblets.feedview import feedparser

        data = urllib2.urlopen(url).read()

        parser = feedparser.parse(data)

        context = {
            'parser': parser,
        }
        context.update(extra_context)

        return render_to_string(template_name,
                                RequestContext(request, context))

    try:
        return HttpResponse(
            cache_memoize("feed-%s" % url,
                          fetch_feed,
                          cache_expiration,
                          force_overwrite=request.GET.has_key("reload")))
    except urllib2.URLError, e:
        context = {
            'error': e,
        }
        context.update(extra_context)

        return render_to_response(template_name,
                                  RequestContext(request, context))
コード例 #39
0
ファイル: widgets.py プロジェクト: isakb/reviewboard
def get_stats(request):
    """Shows a list of totals for multiple database objects.

    Passes a count for Comments, Reviews and more to render a widget table.
    """
    def stats_data():
        return {
            'count_comments': Comment.objects.all().count(),
            'count_reviews': Review.objects.all().count(),
            'count_attachments': FileAttachment.objects.all().count(),
            'count_reviewdrafts': ReviewRequestDraft.objects.all().count(),
            'count_screenshots': Screenshot.objects.all().count(),
            'count_diffsets': DiffSet.objects.all().count()
        }

    key = "w-stats-" + str(datetime.date.today())

    return {
        'size': 'widget-small',
        'template': 'admin/widgets/w-stats.html',
        'actions': '',
        'data': cache_memoize(key, stats_data)
    }
コード例 #40
0
def get_diff_files(diffset,
                   filediff=None,
                   interdiffset=None,
                   enable_syntax_highlighting=True,
                   load_chunks=True):
    if filediff:
        filediffs = [filediff]

        if interdiffset:
            log_timer = log_timed("Generating diff file info for "
                                  "interdiffset ids %s-%s, filediff %s" %
                                  (diffset.id, interdiffset.id, filediff.id))
        else:
            log_timer = log_timed("Generating diff file info for "
                                  "diffset id %s, filediff %s" %
                                  (diffset.id, filediff.id))
    else:
        filediffs = diffset.files.select_related().all()

        if interdiffset:
            log_timer = log_timed("Generating diff file info for "
                                  "interdiffset ids %s-%s" %
                                  (diffset.id, interdiffset.id))
        else:
            log_timer = log_timed("Generating diff file info for "
                                  "diffset id %s" % diffset.id)

    # A map used to quickly look up the equivalent interfilediff given a
    # source file.
    interdiff_map = {}
    if interdiffset:
        for interfilediff in interdiffset.files.all():
            if not filediff or \
               filediff.source_file == interfilediff.source_file:
                interdiff_map[interfilediff.source_file] = interfilediff

    key_prefix = "diff-sidebyside-"

    if enable_syntax_highlighting:
        key_prefix += "hl-"

    # In order to support interdiffs properly, we need to display diffs
    # on every file in the union of both diffsets. Iterating over one diffset
    # or the other doesn't suffice.
    #
    # We build a list of parts containing the source filediff, the interdiff
    # filediff (if specified), and whether to force showing an interdiff
    # (in the case where a file existed in the source filediff but was
    # reverted in the interdiff).
    filediff_parts = []

    for filediff in filediffs:
        interfilediff = None

        if filediff.source_file in interdiff_map:
            interfilediff = interdiff_map[filediff.source_file]
            del (interdiff_map[filediff.source_file])

        filediff_parts.append((filediff, interfilediff, interdiffset != None))

    if interdiffset:
        # We've removed everything in the map that we've already found.
        # What's left are interdiff files that are new. They have no file
        # to diff against.
        #
        # The end result is going to be a view that's the same as when you're
        # viewing a standard diff. As such, we can pretend the interdiff is
        # the source filediff and not specify an interdiff. Keeps things
        # simple, code-wise, since we really have no need to special-case
        # this.
        filediff_parts += [(interdiff, None, False)
                           for interdiff in interdiff_map.values()]

    files = []

    for parts in filediff_parts:
        filediff, interfilediff, force_interdiff = parts

        newfile = (filediff.source_revision == PRE_CREATION)

        if interdiffset:
            # First, find out if we want to even process this one.
            # We only process if there's a difference in files.

            if (filediff and interfilediff
                    and filediff.diff == interfilediff.diff):
                continue

            source_revision = "Diff Revision %s" % diffset.revision

            if not interfilediff and force_interdiff:
                dest_revision = "Diff Revision %s - File Reverted" % \
                                interdiffset.revision
            else:
                dest_revision = "Diff Revision %s" % interdiffset.revision
        else:
            source_revision = get_revision_str(filediff.source_revision)

            if newfile:
                dest_revision = NEW_FILE_STR
            else:
                dest_revision = NEW_CHANGE_STR

        i = filediff.source_file.rfind('/')

        if i != -1:
            basepath = filediff.source_file[:i]
            basename = filediff.source_file[i + 1:]
        else:
            basepath = ""
            basename = filediff.source_file

        tool = filediff.diffset.repository.get_scmtool()
        depot_filename = tool.normalize_path_for_display(filediff.source_file)
        dest_filename = tool.normalize_path_for_display(filediff.dest_file)

        file = {
            'depot_filename': depot_filename,
            'dest_filename': dest_filename or depot_filename,
            'basename': basename,
            'basepath': basepath,
            'revision': source_revision,
            'dest_revision': dest_revision,
            'filediff': filediff,
            'interfilediff': interfilediff,
            'force_interdiff': force_interdiff,
            'binary': filediff.binary,
            'deleted': filediff.deleted,
            'newfile': newfile,
            'index': len(files),
        }

        if load_chunks:
            chunks = []

            if not filediff.binary and not filediff.deleted:
                key = key_prefix

                if not force_interdiff:
                    key += str(filediff.id)
                elif interfilediff:
                    key += "interdiff-%s-%s" % (filediff.id, interfilediff.id)
                else:
                    key += "interdiff-%s-none" % filediff.id

                chunks = cache_memoize(
                    key,
                    lambda: list(
                        get_chunks(filediff.diffset, filediff, interfilediff,
                                   force_interdiff, enable_syntax_highlighting)
                    ),
                    large_data=True)

            file['chunks'] = chunks
            file['changed_chunk_indexes'] = []
            file['whitespace_only'] = True

            for j, chunk in enumerate(file['chunks']):
                chunk['index'] = j

                if chunk['change'] != 'equal':
                    file['changed_chunk_indexes'].append(j)
                    meta = chunk.get('meta', {})

                    if not meta.get('whitespace_chunk', False):
                        file['whitespace_only'] = False

            file['num_changes'] = len(file['changed_chunk_indexes'])

        files.append(file)

    def cmp_file(x, y):
        # Sort based on basepath in asc order
        if x["basepath"] != y["basepath"]:
            return cmp(x["basepath"], y["basepath"])

        # Sort based on filename in asc order, then based on extension in desc
        # order, to make *.h be ahead of *.c/cpp
        x_file, x_ext = os.path.splitext(x["basename"])
        y_file, y_ext = os.path.splitext(y["basename"])
        if x_file != y_file:
            return cmp(x_file, y_file)
        else:
            return cmp(y_ext, x_ext)

    files.sort(cmp_file)

    log_timer.done()

    return files
コード例 #41
0
def get_diff_files(diffset, filediff=None, interdiffset=None, enable_syntax_highlighting=True, load_chunks=True):
    if filediff:
        filediffs = [filediff]

        if interdiffset:
            log_timer = log_timed(
                "Generating diff file info for "
                "interdiffset ids %s-%s, filediff %s" % (diffset.id, interdiffset.id, filediff.id)
            )
        else:
            log_timer = log_timed(
                "Generating diff file info for " "diffset id %s, filediff %s" % (diffset.id, filediff.id)
            )
    else:
        filediffs = diffset.files.all()

        if interdiffset:
            log_timer = log_timed(
                "Generating diff file info for " "interdiffset ids %s-%s" % (diffset.id, interdiffset.id)
            )
        else:
            log_timer = log_timed("Generating diff file info for " "diffset id %s" % diffset.id)

    # A map used to quickly look up the equivalent interfilediff given a
    # source file.
    interdiff_map = {}
    if interdiffset:
        for interfilediff in interdiffset.files.all():
            if not filediff or filediff.source_file == interfilediff.source_file:
                interdiff_map[interfilediff.source_file] = interfilediff

    key_prefix = "diff-sidebyside-"

    if enable_syntax_highlighting:
        key_prefix += "hl-"

    # In order to support interdiffs properly, we need to display diffs
    # on every file in the union of both diffsets. Iterating over one diffset
    # or the other doesn't suffice.
    #
    # We build a list of parts containing the source filediff, the interdiff
    # filediff (if specified), and whether to force showing an interdiff
    # (in the case where a file existed in the source filediff but was
    # reverted in the interdiff).
    filediff_parts = []

    for filediff in filediffs:
        interfilediff = None

        if filediff.source_file in interdiff_map:
            interfilediff = interdiff_map[filediff.source_file]
            del (interdiff_map[filediff.source_file])

        filediff_parts.append((filediff, interfilediff, interdiffset != None))

    if interdiffset:
        # We've removed everything in the map that we've already found.
        # What's left are interdiff files that are new. They have no file
        # to diff against.
        #
        # The end result is going to be a view that's the same as when you're
        # viewing a standard diff. As such, we can pretend the interdiff is
        # the source filediff and not specify an interdiff. Keeps things
        # simple, code-wise, since we really have no need to special-case
        # this.
        for interdiff in interdiff_map.values():
            filediff_parts.append((interdiff, None, False))

    files = []
    index = 0

    for parts in filediff_parts:
        filediff, interfilediff, force_interdiff = parts

        filediff_revision_str = get_revision_str(filediff.source_revision)
        newfile = filediff.source_revision == PRE_CREATION

        if interdiffset:
            # First, find out if we want to even process this one.
            # We only process if there's a difference in files.

            if filediff and interfilediff and filediff.diff == interfilediff.diff:
                continue

            source_revision = "Diff Revision %s" % diffset.revision

            if not interfilediff and force_interdiff:
                dest_revision = "Diff Revision %s - File Reverted" % interdiffset.revision
            else:
                dest_revision = "Diff Revision %s" % interdiffset.revision
        else:
            source_revision = get_revision_str(filediff.source_revision)

            if newfile:
                dest_revision = _("New File")
            else:
                dest_revision = _("New Change")

        i = filediff.source_file.rfind("/")

        if i != -1:
            basepath = filediff.source_file[:i]
            basename = filediff.source_file[i + 1 :]
        else:
            basepath = ""
            basename = filediff.source_file

        file = {
            "depot_filename": filediff.source_file,
            "basename": basename,
            "basepath": basepath,
            "revision": source_revision,
            "dest_revision": dest_revision,
            "filediff": filediff,
            "interfilediff": interfilediff,
            "force_interdiff": force_interdiff,
            "binary": filediff.binary,
            "newfile": newfile,
            "index": len(files),
        }

        if load_chunks:
            chunks = []

            if not filediff.binary:
                key = key_prefix

                if not force_interdiff:
                    key += str(filediff.id)
                elif interfilediff:
                    key += "interdiff-%s-%s" % (filediff.id, interfilediff.id)
                else:
                    key += "interdiff-%s-none" % filediff.id

                chunks = cache_memoize(
                    key,
                    lambda: get_chunks(
                        filediff.diffset, filediff, interfilediff, force_interdiff, enable_syntax_highlighting
                    ),
                    large_data=True,
                )

            file["chunks"] = chunks
            file["changed_chunks"] = []
            file["whitespace_only"] = True

            for j, chunk in enumerate(file["chunks"]):
                chunk["index"] = j
                if chunk["change"] != "equal":
                    file["changed_chunks"].append(chunk)
                    meta = chunk.get("meta", {})

                    if not meta.get("whitespace_chunk", False):
                        file["whitespace_only"] = False

            file["num_changes"] = len(file["changed_chunks"])

        files.append(file)

    def cmp_file(x, y):
        # Sort based on basepath in asc order
        if x["basepath"] != y["basepath"]:
            return cmp(x["basepath"], y["basepath"])
        # Sort based on filename in asc order, then basod on extension in desc
        # order, to make *.h be ahead of *.c/cpp
        x_file, x_ext = os.path.splitext(x["basename"])
        y_file, y_ext = os.path.splitext(y["basename"])
        if x_file != y_file:
            return cmp(x_file, y_file)
        else:
            return cmp(y_ext, x_ext)

    files.sort(cmp_file)

    log_timer.done()

    return files
コード例 #42
0
ファイル: markdownui.py プロジェクト: prodigeni/reviewboard
 def render(self):
     """Render the document."""
     return cache_memoize('markdown-attachment-%d' % self.obj.pk,
                          self._render)
コード例 #43
0
ファイル: views.py プロジェクト: B-Rich/reviewboard
def build_diff_fragment(request, file, chunkindex, highlighting, collapseall,
                        lines_of_context, standalone=False, context=None,
                        template_name='diffviewer/diff_file_fragment.html'):
    if not context:
        context = {}

    cache = not lines_of_context
    key = ''

    if cache:
        filediff = file['filediff']
        key = "%s-%s-%s-" % (template_name, file['index'],
                             filediff.diffset.revision)

        if file['force_interdiff']:
            interfilediff = file['interfilediff']

            if interfilediff:
                key += 'interdiff-%s-%s' % (filediff.pk, interfilediff.pk)
            else:
                key += 'interdiff-%s-none' % filediff.pk
        else:
            key += str(filediff.pk)

    if chunkindex:
        chunkindex = int(chunkindex)
        num_chunks = len(file['chunks'])

        if chunkindex < 0 or chunkindex >= num_chunks:
            raise UserVisibleError(_(u"Invalid chunk index %s specified.") % \
                                   chunkindex)

        file['chunks'] = [file['chunks'][chunkindex]]

        if cache:
            key += '-chunk-%s' % chunkindex

        if lines_of_context:
            assert collapseall

            context['lines_of_context'] = lines_of_context

            chunk = file['chunks'][0]
            lines = chunk['lines']
            num_lines = len(lines)
            new_lines = []

            # If we only have one value, then assume it represents before
            # and after the collapsed header area.
            if len(lines_of_context) == 1:
                lines_of_context.append(lines_of_context[0])

            if lines_of_context[0] + lines_of_context[1] >= num_lines:
                # The lines of context we're expanding to would cover the
                # entire chunk, so just expand the entire thing.
                collapseall = False
            else:
                lines_of_context[0] = min(num_lines, lines_of_context[0])
                lines_of_context[1] = min(num_lines, lines_of_context[1])

                # The start of the collapsed header area.
                collapse_i = 0

                # Compute the start of the second chunk of code, after the
                # header.
                if chunkindex < num_chunks - 1:
                    chunk2_i = max(num_lines - lines_of_context[1], 0)
                else:
                    chunk2_i = num_lines

                if lines_of_context[0] and chunkindex > 0:
                    # The chunk of context preceding the header.
                    collapse_i = lines_of_context[0]
                    file['chunks'].insert(0, {
                        'change': chunk['change'],
                        'collapsable': False,
                        'index': chunkindex,
                        'lines': lines[:collapse_i],
                        'meta': chunk['meta'],
                        'numlines': collapse_i,
                    })

                # The header contents
                new_lines += lines[collapse_i:chunk2_i]

                if (chunkindex < num_chunks - 1 and
                    chunk2_i + lines_of_context[1] <= num_lines):
                    # The chunk of context after the header.
                    file['chunks'].append({
                        'change': chunk['change'],
                        'collapsable': False,
                        'index': chunkindex,
                        'lines': lines[chunk2_i:],
                        'meta': chunk['meta'],
                        'numlines': num_lines - chunk2_i,
                    })

                if new_lines:
                    numlines = len(new_lines)

                    chunk.update({
                        'lines': new_lines,
                        'numlines': numlines,
                        'collapsable': True,
                    })

                    # Fix the headers to accommodate the new range.
                    if chunkindex < num_chunks - 1:
                        for prefix, index in (('left', 1), ('right', 4)):
                            chunk['meta'][prefix + '_headers'] = [
                                header
                                for header in chunk['meta'][prefix + '_headers']
                                if header[0] <= new_lines[-1][index]
                            ]

                        chunk['meta']['headers'] = \
                            compute_chunk_last_header(new_lines, numlines,
                                                      chunk['meta'])
                else:
                    file['chunks'].remove(chunk)

    context.update({
        'collapseall': collapseall,
        'file': file,
        'lines_of_context': lines_of_context or (0, 0),
        'standalone': standalone,
    })

    func = lambda: render_to_string(template_name,
                                    RequestContext(request, context))

    if cache:
        if collapseall:
            key += '-collapsed'

        if highlighting:
            key += '-highlighting'

        key += '-%s' % settings.AJAX_SERIAL

        return cache_memoize(key, func)
    else:
        return func()
コード例 #44
0
ファイル: diffutils.py プロジェクト: ballarin/reviewboard
def get_diff_files(diffset, filediff=None, interdiffset=None,
                   enable_syntax_highlighting=True,
                   load_chunks=True):
    if filediff:
        filediffs = [filediff]

        if interdiffset:
            log_timer = log_timed("Generating diff file info for "
                                  "interdiffset ids %s-%s, filediff %s" %
                                  (diffset.id, interdiffset.id, filediff.id))
        else:
            log_timer = log_timed("Generating diff file info for "
                                  "diffset id %s, filediff %s" %
                                  (diffset.id, filediff.id))
    else:
        filediffs = diffset.files.select_related().all()

        if interdiffset:
            log_timer = log_timed("Generating diff file info for "
                                  "interdiffset ids %s-%s" %
                                  (diffset.id, interdiffset.id))
        else:
            log_timer = log_timed("Generating diff file info for "
                                  "diffset id %s" % diffset.id)


    # A map used to quickly look up the equivalent interfilediff given a
    # source file.
    interdiff_map = {}
    if interdiffset:
        for interfilediff in interdiffset.files.all():
            if not filediff or \
               filediff.source_file == interfilediff.source_file:
                interdiff_map[interfilediff.source_file] = interfilediff

    key_prefix = "diff-sidebyside-"

    if enable_syntax_highlighting:
        key_prefix += "hl-"


    # In order to support interdiffs properly, we need to display diffs
    # on every file in the union of both diffsets. Iterating over one diffset
    # or the other doesn't suffice.
    #
    # We build a list of parts containing the source filediff, the interdiff
    # filediff (if specified), and whether to force showing an interdiff
    # (in the case where a file existed in the source filediff but was
    # reverted in the interdiff).
    filediff_parts = []

    for filediff in filediffs:
        interfilediff = None

        if filediff.source_file in interdiff_map:
            interfilediff = interdiff_map[filediff.source_file]
            del(interdiff_map[filediff.source_file])

        filediff_parts.append((filediff, interfilediff, interdiffset != None))


    if interdiffset:
        # We've removed everything in the map that we've already found.
        # What's left are interdiff files that are new. They have no file
        # to diff against.
        #
        # The end result is going to be a view that's the same as when you're
        # viewing a standard diff. As such, we can pretend the interdiff is
        # the source filediff and not specify an interdiff. Keeps things
        # simple, code-wise, since we really have no need to special-case
        # this.
        filediff_parts += [(interdiff, None, False)
                           for interdiff in interdiff_map.values()]


    files = []

    for parts in filediff_parts:
        filediff, interfilediff, force_interdiff = parts

        newfile = (filediff.source_revision == PRE_CREATION)

        if interdiffset:
            # First, find out if we want to even process this one.
            # We only process if there's a difference in files.

            if (filediff and interfilediff and
                filediff.diff == interfilediff.diff):
                continue

            source_revision = "Diff Revision %s" % diffset.revision

            if not interfilediff and force_interdiff:
                dest_revision = "Diff Revision %s - File Reverted" % \
                                interdiffset.revision
            else:
                dest_revision = "Diff Revision %s" % interdiffset.revision
        else:
            source_revision = get_revision_str(filediff.source_revision)

            if newfile:
                dest_revision = NEW_FILE_STR
            else:
                dest_revision = NEW_CHANGE_STR

        i = filediff.source_file.rfind('/')

        if i != -1:
            basepath = filediff.source_file[:i]
            basename = filediff.source_file[i + 1:]
        else:
            basepath = ""
            basename = filediff.source_file

        tool = filediff.diffset.repository.get_scmtool()
        depot_filename = tool.normalize_path_for_display(filediff.source_file)
        dest_filename = tool.normalize_path_for_display(filediff.dest_file)

        file = {
            'depot_filename': depot_filename,
            'dest_filename': dest_filename or depot_filename,
            'basename': basename,
            'basepath': basepath,
            'revision': source_revision,
            'dest_revision': dest_revision,
            'filediff': filediff,
            'interfilediff': interfilediff,
            'force_interdiff': force_interdiff,
            'binary': filediff.binary,
            'deleted': filediff.deleted,
            'newfile': newfile,
            'index': len(files),
        }

        if load_chunks:
            chunks = []

            if not filediff.binary and not filediff.deleted:
                key = key_prefix

                if not force_interdiff:
                    key += str(filediff.id)
                elif interfilediff:
                    key += "interdiff-%s-%s" % (filediff.id, interfilediff.id)
                else:
                    key += "interdiff-%s-none" % filediff.id

                chunks = cache_memoize(
                    key,
                    lambda: list(get_chunks(filediff.diffset,
                                            filediff, interfilediff,
                                            force_interdiff,
                                            enable_syntax_highlighting)),
                    large_data=True)

            file['chunks'] = chunks
            file['changed_chunk_indexes'] = []
            file['whitespace_only'] = True

            for j, chunk in enumerate(file['chunks']):
                chunk['index'] = j

                if chunk['change'] != 'equal':
                    file['changed_chunk_indexes'].append(j)
                    meta = chunk.get('meta', {})

                    if not meta.get('whitespace_chunk', False):
                        file['whitespace_only'] = False

            file['num_changes'] = len(file['changed_chunk_indexes'])

        files.append(file)

    def cmp_file(x, y):
        # Sort based on basepath in asc order
        if x["basepath"] != y["basepath"]:
            return cmp(x["basepath"], y["basepath"])

        # Sort based on filename in asc order, then based on extension in desc
        # order, to make *.h be ahead of *.c/cpp
        x_file, x_ext = os.path.splitext(x["basename"])
        y_file, y_ext = os.path.splitext(y["basename"])
        if x_file != y_file:
            return cmp(x_file, y_file)
        else:
            return cmp(y_ext, x_ext)

    files.sort(cmp_file)

    log_timer.done()

    return files