예제 #1
0
 def __init__(self, critic, repository, blob_sha1, file_obj):
     diffFile = diff.File(
         repository=repository._impl.getInternal(critic), path=file_obj.path,
         new_sha1=blob_sha1)
     diffFile.loadNewLines(
         highlighted=True, request_highlight=True, highlight_mode="json")
     self.__filecontents = diffFile.newLines(highlighted=True)
예제 #2
0
파일: merge.py 프로젝트: yanlimin9/critic
def parseMergeDifferences(db, repository, commit):
    mergebase = gitutils.Commit.fromSHA1(db, repository,
                                         repository.mergebase(commit, db=db))

    result = {}
    log = [""]

    for parent_sha1 in commit.parents:
        parent = gitutils.Commit.fromSHA1(db, repository, parent_sha1)

        if parent_sha1 == mergebase:
            result[parent_sha1] = diff.parse.parseDifferences(
                repository, from_commit=parent, to_commit=commit)[parent_sha1]
        else:
            paths_on_branch = set(
                repository.run('diff', '--name-only',
                               "%s..%s" % (mergebase, parent)).splitlines())
            paths_in_merge = set(
                repository.run('diff', '--name-only',
                               "%s..%s" % (parent, commit)).splitlines())

            filter_paths = paths_on_branch & paths_in_merge

            on_branch = diff.parse.parseDifferences(
                repository,
                from_commit=mergebase,
                to_commit=parent,
                filter_paths=filter_paths)[mergebase.sha1]
            in_merge = diff.parse.parseDifferences(
                repository,
                from_commit=parent,
                to_commit=commit,
                filter_paths=filter_paths)[parent_sha1]

            files_on_branch = dict([(file.path, file) for file in on_branch])

            result_for_parent = []

            for file_in_merge in in_merge:
                file_on_branch = files_on_branch.get(file_in_merge.path)
                if file_on_branch:
                    filtered_chunks = filterChunks(log, file_on_branch,
                                                   file_in_merge,
                                                   file_in_merge.path)

                    if filtered_chunks:
                        result_for_parent.append(
                            diff.File(id=None,
                                      repository=repository,
                                      path=file_in_merge.path,
                                      old_sha1=file_in_merge.old_sha1,
                                      new_sha1=file_in_merge.new_sha1,
                                      old_mode=file_in_merge.old_mode,
                                      new_mode=file_in_merge.new_mode,
                                      chunks=filtered_chunks))

            result[parent_sha1] = result_for_parent

    return result
예제 #3
0
 def __getLegacyFile(self, critic):
     return diff.File(self.filechange.file.id,
                      self.filechange.file.path,
                      self.filechange.old_sha1,
                      self.filechange.new_sha1,
                      self.__repository._impl.getInternal(critic),
                      old_mode=self.filechange.old_mode,
                      new_mode=self.filechange.new_mode)
예제 #4
0
파일: load.py 프로젝트: yanlimin9/critic
def loadChangesets(db, repository, changesets, filtered_file_ids=None, load_chunks=True):
    cursor = db.cursor()

    changeset_ids = [changeset.id for changeset in changesets]
    filtered_file_ids = list(filtered_file_ids) if filtered_file_ids else None

    if filtered_file_ids is None:
        cursor.execute("""SELECT changeset, file, path, old_sha1, new_sha1, old_mode, new_mode
                            FROM fileversions
                            JOIN files ON (files.id=fileversions.file)
                           WHERE changeset=ANY (%s)""",
                       (changeset_ids,))
    else:
        cursor.execute("""SELECT changeset, file, path, old_sha1, new_sha1, old_mode, new_mode
                            FROM fileversions
                            JOIN files ON (files.id=fileversions.file)
                           WHERE changeset=ANY (%s)
                             AND file=ANY (%s)""",
                       (changeset_ids, filtered_file_ids))

    files = dict([(changeset.id, {}) for changeset in changesets])

    for changeset_id, file_id, file_path, file_old_sha1, file_new_sha1, file_old_mode, file_new_mode in cursor.fetchall():
        files[changeset_id][file_id] = diff.File(file_id, file_path,
                                                 file_old_sha1, file_new_sha1,
                                                 repository,
                                                 old_mode=file_old_mode,
                                                 new_mode=file_new_mode,
                                                 chunks=[])

    if load_chunks:
        if filtered_file_ids is None:
            cursor.execute("""SELECT id, changeset, file, deleteOffset, deleteCount, insertOffset, insertCount, analysis, whitespace
                                FROM chunks
                                WHERE changeset=ANY (%s)
                                ORDER BY file, deleteOffset ASC""",
                           (changeset_ids,))
        else:
            cursor.execute("""SELECT id, changeset, file, deleteOffset, deleteCount, insertOffset, insertCount, analysis, whitespace
                                FROM chunks
                                WHERE changeset=ANY (%s)
                                  AND file=ANY (%s)
                                ORDER BY file, deleteOffset ASC""",
                           (changeset_ids, filtered_file_ids))

        for chunk_id, changeset_id, file_id, delete_offset, delete_count, insert_offset, insert_count, analysis, is_whitespace in cursor:
            files[changeset_id][file_id].chunks.append(diff.Chunk(delete_offset, delete_count,
                                                                  insert_offset, insert_count,
                                                                  id=chunk_id,
                                                                  is_whitespace=is_whitespace,
                                                                  analysis=analysis))

    for changeset in changesets:
        changeset.files = diff.File.sorted(files[changeset.id].values())

    return changesets
예제 #5
0
    def setCustom(self, review, commit, file_id, first_line, last_line):
        """
        Initialize for propagation of a custom location.

        This mode of operation is used to propagate a new comment chain to all
        relevant commits current part of the review.

        Returns false if the creating a comment at the specified location is not
        supported, typically because the commit is not being reviewed in the
        review.
        """

        assert first_line > 0
        assert last_line >= first_line

        if not review.containsCommit(self.db, commit, True):
            return False

        self.review = review
        self.rebases = review.getReviewRebases(self.db)
        self.initial_commit = commit
        self.addressed_by = []
        self.file_path = dbutils.describe_file(self.db, file_id)
        self.file_id = file_id
        self.location = Location(first_line, last_line)
        self.active = True

        file_entry = commit.getFileEntry(self.file_path)

        if file_entry is None:
            # File doesn't exist (in the given commit.)
            return False

        diff_file = diff.File(new_sha1=file_entry.sha1,
                              new_mode=file_entry.mode,
                              repository=review.repository)
        diff_file.loadNewLines()

        if last_line > diff_file.newCount():
            # Range of lines is out of bounds.
            return False

        self.all_lines = { file_entry.sha1: (first_line, last_line) }
        self.new_lines = { file_entry.sha1: (first_line, last_line) }

        return True
예제 #6
0
    def process(self, db, user, repository_id, path, sha1, ranges, tabify):
        repository = gitutils.Repository.fromId(db, repository_id)
        cursor = db.cursor()

        def getContext(offset):
            cursor.execute(
                """SELECT context
                                FROM codecontexts
                               WHERE sha1=%s
                                 AND %s BETWEEN first_line AND last_line
                            ORDER BY first_line DESC
                               LIMIT 1""", (sha1, offset))

            row = cursor.fetchone()

            if row: return row[0]
            else: return None

        file = diff.File(repository=repository, path=path, new_sha1=sha1)
        file.loadNewLines(highlighted=True, request_highlight=True)

        if tabify:
            tabwidth = file.getTabWidth()
            indenttabsmode = file.getIndentTabsMode()

        def processRange(offset, count, context):
            if context: context = getContext(offset)
            else: context = None

            # Offset is a 1-based line number.
            start = offset - 1
            # If count is -1, fetch all lines.
            end = start + count if count > -1 else None

            lines = file.newLines(highlighted=True)[start:end]

            if tabify:
                lines = [
                    htmlutils.tabify(line, tabwidth, indenttabsmode)
                    for line in lines
                ]

            return {"lines": lines, "context": context}

        return OperationResult(
            ranges=[processRange(**line_range) for line_range in ranges])
예제 #7
0
파일: html.py 프로젝트: yanlimin9/critic
def renderCodeCommentChain(db,
                           target,
                           user,
                           review,
                           chain,
                           context_lines=3,
                           compact=False,
                           tabify=False,
                           original=False,
                           changeset=None,
                           linkify=False):
    repository = review.repository

    old_sha1 = None
    new_sha1 = None

    old = 1
    new = 2

    cursor = db.cursor()

    file_id = chain.file_id
    file_path = dbutils.describe_file(db, file_id)

    if (chain.state != "addressed"
            or original) and chain.first_commit == chain.last_commit:
        sha1 = chain.first_commit.getFileSHA1(file_path)

        cursor.execute(
            "SELECT first_line, last_line FROM commentchainlines WHERE chain=%s AND sha1=%s",
            (chain.id, sha1))
        first_line, last_line = cursor.fetchone()

        file = diff.File(file_id,
                         file_path,
                         sha1,
                         sha1,
                         review.repository,
                         chunks=[])
        file.loadNewLines(True)

        start = max(1, first_line - context_lines)
        end = min(file.newCount(), last_line + context_lines)
        count = end + 1 - start

        lines = file.newLines(True)
        lines = [
            diff.Line(diff.Line.CONTEXT, start + index,
                      lines[start + index - 1], start + index,
                      lines[start + index - 1]) for index in range(count)
        ]

        file.macro_chunks = [diff.MacroChunk([], lines)]

        use = new
        display_type = "new"
        commit_url_component = "sha1=%s" % chain.first_commit.sha1
    else:
        if chain.state == "addressed" and not original and review.containsCommit(
                db, chain.addressed_by):
            parent = gitutils.Commit.fromSHA1(db, review.repository,
                                              chain.addressed_by.parents[0])
            child = chain.addressed_by
            use = old
        else:
            parent = chain.first_commit
            child = chain.last_commit

            if parent == child:
                if chain.origin == "old":
                    cursor.execute(
                        """SELECT changesets.child
                                        FROM changesets, reviewchangesets
                                       WHERE changesets.parent=%s
                                         AND reviewchangesets.changeset=changesets.id
                                         AND reviewchangesets.review=%s""",
                        [child.getId(db), review.id])

                    try:
                        child = gitutils.Commit.fromId(db, repository,
                                                       cursor.fetchone()[0])
                    except:
                        parent = gitutils.Commit.fromSHA1(
                            db, repository, child.parents[0])
                else:
                    parent = gitutils.Commit.fromSHA1(db, repository,
                                                      child.parents[0])

            if chain.origin == "old": use = old
            else: use = new

        if parent.sha1 in child.parents and len(child.parents) == 1:
            commit = child
            from_commit = None
            to_commit = None
        else:
            commit = None
            from_commit = parent
            to_commit = child

        if changeset:
            assert ((changeset.parent == from_commit
                     and changeset.child == to_commit) if commit is None else
                    (changeset.parent.sha1 == commit.parents[0]
                     and changeset.child == commit))
            assert changeset.getFile(file_id)
        else:
            changeset = changeset_utils.createChangeset(
                db,
                user,
                repository,
                commit=commit,
                from_commit=from_commit,
                to_commit=to_commit,
                filtered_file_ids=set((file_id, )))[0]

        file = changeset.getFile(file_id)

        if not file:
            if chain.state == "addressed" and not original:
                renderCodeCommentChain(db,
                                       target,
                                       user,
                                       review,
                                       chain,
                                       context_lines,
                                       compact,
                                       tabify,
                                       original=True)
                return
            else:
                raise

        # Commit so that the diff and its analysis, written to the database by createChangeset(),
        # can be reused later.
        db.commit()

        old_sha1 = file.old_sha1
        new_sha1 = file.new_sha1

        if use == old and old_sha1 == '0' * 40: use = new
        elif use == new and new_sha1 == '0' * 40: use = old

        if use == old: sha1 = old_sha1
        else: sha1 = new_sha1

        cursor.execute(
            "SELECT first_line, last_line FROM commentchainlines WHERE chain=%s AND sha1=%s",
            [chain.id, sha1])

        first_line, last_line = cursor.fetchone()

        def readChunks():
            return [
                diff.Chunk(delete_offset,
                           delete_count,
                           insert_offset,
                           insert_count,
                           analysis=analysis,
                           is_whitespace=is_whitespace)
                for delete_offset, delete_count, insert_offset, insert_count,
                analysis, is_whitespace in cursor.fetchall()
            ]

        first_context_line = first_line - context_lines
        last_context_line = last_line + context_lines

        def includeChunk(chunk):
            if use == old:
                chunk_first_line, chunk_last_line = chunk.delete_offset, chunk.delete_offset + chunk.delete_count - 1
            else:
                chunk_first_line, chunk_last_line = chunk.insert_offset, chunk.insert_offset + chunk.insert_count - 1

            return chunk_last_line >= first_context_line and chunk_first_line <= last_context_line

        def lineFilter(line):
            if use == old:
                linenr = line.old_offset
                if linenr == first_context_line and line.type == diff.Line.INSERTED:
                    return False
            else:
                linenr = line.new_offset
                if linenr == first_context_line and line.type == diff.Line.DELETED:
                    return False

            return first_context_line <= linenr <= last_context_line

        file.loadOldLines(True)
        file.loadNewLines(True)

        context = diff.context.ContextLines(file, file.chunks,
                                            [(chain, use == old)])
        file.macro_chunks = context.getMacroChunks(context_lines,
                                                   highlight=True,
                                                   lineFilter=lineFilter)

        try:
            macro_chunk = file.macro_chunks[0]
        except:
            raise repr((parent.sha1, child.sha1))

        display_type = "both"

        if chain.state != "addressed":
            first_line_type = macro_chunk.lines[0].type
            if first_line_type == diff.Line.CONTEXT or (
                    use == old and first_line_type == diff.Line.DELETED) or (
                        use == new and first_line_type == diff.Line.INSERTED):
                for line in macro_chunk.lines[1:]:
                    if first_line_type != line.type:
                        break
                else:
                    display_type = "old" if use == old else "new"

        commit_url_component = "from=%s&to=%s" % (parent.sha1, child.sha1)

    def renderHeaderLeft(db, target, file):
        target.span("comment-chain-title").a(href="/showcomment?chain=%d" %
                                             chain.id).text(chain.title())

    def renderHeaderRight(db, target, file):
        side = use == old and "o" or "n"
        uri = "showcommit?%s&review=%d&file=%d#f%d%s%d" % (
            commit_url_component, review.id, file.id, file.id, side,
            first_line)
        target.span("filename").a(href=uri).text(file.path)

    def renderCommentsLocal(db, target, **kwargs):
        if display_type == "both":
            if use == old: position = "left"
            else: position = "right"
        else:
            position = "center"

        renderComments(db, target, user, chain, position, linkify)

    def lineId(base):
        return "c%d%s" % (chain.id, base)

    def lineCellId(base):
        return "c%d%s" % (chain.id, base)

    target.addInternalScript("commentChainById[%d] = %s;" %
                             (chain.id, chain.getJSConstructor(sha1)),
                             here=True)

    changeset_html.renderFile(db,
                              target,
                              user,
                              review,
                              file,
                              options={
                                  "support_expand": False,
                                  "display_type": display_type,
                                  "header_left": renderHeaderLeft,
                                  "header_right": renderHeaderRight,
                                  "content_after": renderCommentsLocal,
                                  "show": True,
                                  "expand": True,
                                  "line_id": lineId,
                                  "line_cell_id": lineCellId,
                                  "compact": compact,
                                  "tabify": tabify,
                                  "include_deleted": True
                              })

    data = (chain.id, file_id, use == old and "o"
            or "n", first_line, chain.id, file_id, use == old and "o"
            or "n", last_line, htmlutils.jsify(chain.type),
            htmlutils.jsify(chain.state), chain.id)

    target.addInternalScript("""$(document).ready(function ()
  {
    var markers = new CommentMarkers(null);
    markers.setLines(document.getElementById('c%df%d%s%d'), document.getElementById('c%df%d%s%d'));
    markers.setType(%s, %s);
    commentChainById[%d].markers = markers;
  });""" % data,
                             here=True)
예제 #8
0
def detectMoves(db, changeset, source_file_ids=None, target_file_ids=None):
    moves = []

    for target_file in changeset.files:
        if target_file_ids and not target_file.id in target_file_ids: continue

        current_chunks = target_file.chunks

        count = 0
        log = ""

        while current_chunks:
            extra_target_chunks = []
            count += 1

            for target_chunk in current_chunks:
                # White-space only changes; unlikely target of moved code.
                if target_chunk.is_whitespace:
                    continue

                # Too few inserted lines; couldn't possibly be an interesting target
                # of moved code.
                if target_chunk.insert_count < 5:
                    continue

                if target_chunk.analysis:
                    # If more than half the inserted lines are mapped against
                    # deleted lines, most likely edited rather than moved code.
                    if target_chunk.insert_count < len(
                            target_chunk.analysis.split(";")) * 2:
                        continue

                target_file.loadNewLines()
                target_chunk.inserted_lines = target_file.getNewLines(
                    target_chunk)

                source_file, chunk = findSourceChunk(db, changeset,
                                                     source_file_ids,
                                                     target_file, target_chunk,
                                                     extra_target_chunks)

                if source_file and chunk:
                    moves.append((source_file, target_file, chunk))
                    continue

            current_chunks = extra_target_chunks

    if moves:

        def orderChunks(a, b):
            a_source_file, a_target_file, a_chunk = a
            b_source_file, b_target_file, b_chunk = b

            c = cmp(a_target_file.path, b_target_file.path)
            if c != 0: return c
            else: return cmp(a_chunk.insert_offset, b_chunk.insert_offset)

        moves.sort(orderChunks)

        move_changeset = diff.Changeset(None, changeset.parent,
                                        changeset.child, 'moves', [])

        for source_file, target_file, chunk in moves:
            move_file = diff.File(0,
                                  "",
                                  source_file.old_sha1,
                                  target_file.new_sha1,
                                  source_file.repository,
                                  chunks=[chunk],
                                  move_source_file=source_file,
                                  move_target_file=target_file)

            move_changeset.files.append(move_file)

        return move_changeset
    else:
        return None
예제 #9
0
def renderShowFile(req, db, user):
    cursor = db.cursor()

    sha1 = req.getParameter("sha1")
    path = req.getParameter("path")
    line = req.getParameter("line", None)
    review_id = req.getParameter("review", None, filter=int)

    default_tabify = "yes" if user.getPreference(
        db, "commit.diff.visualTabs") else "no"
    tabify = req.getParameter("tabify", default_tabify) == "yes"

    if line is None:
        first, last = None, None
    else:
        if "-" in line:
            first, last = map(int, line.split("-"))
        else:
            first = last = int(line)

        context = req.getParameter(
            "context", user.getPreference(db, "commit.diff.contextLines"), int)

        first_with_context = max(1, first - context)
        last_with_context = last + context

    if user.getPreference(db, "commit.diff.compactMode"):
        default_compact = "yes"
    else:
        default_compact = "no"

    compact = req.getParameter("compact", default_compact) == "yes"

    if len(path) == 0 or path[-1:] == "/":
        raise page.utils.DisplayMessage(
            title="Invalid path parameter",
            body=
            "<p>The path must be non-empty and must not end with a <code>/</code>.</p>",
            html=True)
    if path[0] == '/':
        full_path = path
        if path != "/": path = path[1:]
    else:
        full_path = "/" + path
        if not path: path = "/"

    if review_id is None:
        review = None
        repository_arg = req.getParameter("repository", "")
        if repository_arg:
            repository = gitutils.Repository.fromParameter(db, repository_arg)
        else:
            repository = gitutils.Repository.fromSHA1(db, sha1)
    else:
        review = dbutils.Review.fromId(db, review_id)
        repository = review.repository

    document = htmlutils.Document(req)

    html = document.html()
    head = html.head()
    body = html.body()

    if review:
        page.utils.generateHeader(body,
                                  db,
                                  user,
                                  lambda target: review_utils.renderDraftItems(
                                      db, user, review, target),
                                  extra_links=[("r/%d" % review.id,
                                                "Back to Review")])
    else:
        page.utils.generateHeader(body, db, user)

    document.addExternalStylesheet("resource/showfile.css")
    document.addInternalStylesheet(
        htmlutils.stripStylesheet(
            user.getResource(db, "syntax.css")[1], compact))

    commit = gitutils.Commit.fromSHA1(db, repository, sha1)
    file_sha1 = commit.getFileSHA1(full_path)
    file_id = dbutils.find_file(db, path=path)

    if file_sha1 is None:
        raise page.utils.DisplayMessage(
            title="File does not exist",
            body=("<p>There is no file named <code>%s</code> in the commit "
                  "<a href='/showcommit?repository=%s&amp;sha1=%s'>"
                  "<code>%s</code></a>.</p>" %
                  (htmlutils.htmlify(textutils.escape(full_path)),
                   htmlutils.htmlify(repository.name), htmlutils.htmlify(sha1),
                   htmlutils.htmlify(sha1[:8]))),
            html=True)

    file = diff.File(file_id, path, None, file_sha1, repository)

    # A new file ID might have been added to the database, so need to commit.
    db.commit()

    if file.canHighlight():
        requestHighlights(repository,
                          {file.new_sha1: (file.path, file.getLanguage())})

    file.loadNewLines(True, request_highlight=True)

    if review:
        document.addInternalScript(user.getJS())
        document.addInternalScript(review.getJS())
        document.addInternalScript(
            "var changeset = { parent: { id: %(id)d, sha1: %(sha1)r }, child: { id: %(id)d, sha1: %(sha1)r } };"
            % {
                'id': commit.getId(db),
                'sha1': commit.sha1
            })
        document.addInternalScript(
            "var files = { %(id)d: { new_sha1: %(sha1)r }, %(sha1)r: { id: %(id)d, side: 'n' } };"
            % {
                'id': file_id,
                'sha1': file_sha1
            })
        document.addExternalStylesheet("resource/review.css")
        document.addExternalScript("resource/review.js")

        cursor.execute(
            """SELECT DISTINCT commentchains.id
                            FROM commentchains
                            JOIN commentchainlines ON (commentchainlines.chain=commentchains.id)
                           WHERE commentchains.review=%s
                             AND commentchains.file=%s
                             AND commentchainlines.sha1=%s
                             AND ((commentchains.state!='draft' OR commentchains.uid=%s)
                              AND commentchains.state!='empty')
                        GROUP BY commentchains.id""",
            (review.id, file_id, file_sha1, user.id))

        comment_chain_script = ""

        for (chain_id, ) in cursor.fetchall():
            chain = review_comment.CommentChain.fromId(db,
                                                       chain_id,
                                                       user,
                                                       review=review)
            chain.loadComments(db, user)

            comment_chain_script += "commentChains.push(%s);\n" % chain.getJSConstructor(
                file_sha1)

        if comment_chain_script:
            document.addInternalScript(comment_chain_script)

    document.addExternalStylesheet("resource/comment.css")
    document.addExternalScript("resource/comment.js")
    document.addExternalScript("resource/showfile.js")

    if tabify:
        document.addExternalStylesheet("resource/tabify.css")
        document.addExternalScript("resource/tabify.js")
        tabwidth = file.getTabWidth()
        indenttabsmode = file.getIndentTabsMode()

    if user.getPreference(db, "commit.diff.highlightIllegalWhitespace"):
        document.addInternalStylesheet(
            user.getResource(db, "whitespace.css")[1], compact)

    if first is not None:
        document.addInternalScript(
            "var firstSelectedLine = %d, lastSelectedLine = %d;" %
            (first, last))

    target = body.div("main")

    if tabify:
        target.script(type="text/javascript").text("calculateTabWidth();")

    table = target.table('file show expanded paleyellow',
                         align='center',
                         cellspacing=0)

    columns = table.colgroup()
    columns.col('edge')
    columns.col('linenr')
    columns.col('line')
    columns.col('middle')
    columns.col('middle')
    columns.col('line')
    columns.col('linenr')
    columns.col('edge')

    thead = table.thead()
    cell = thead.tr().td('h1', colspan=8)
    h1 = cell.h1()

    def make_url(url_path, path):
        params = {"sha1": sha1, "path": path}
        if review is None:
            params["repository"] = str(repository.id)
        else:
            params["review"] = str(review.id)
        return "%s?%s" % (url_path, urllib.urlencode(params))

    h1.a("root", href=make_url("showtree", "/")).text("root")
    h1.span().text('/')

    components = path.split("/")
    for index, component in enumerate(components[:-1]):
        h1.a(href=make_url("showtree", "/".join(components[:index + 1]))).text(
            component, escape=True)
        h1.span().text('/')

    if first is not None:
        h1.a(href=make_url("showfile", "/".join(components))).text(
            components[-1], escape=True)
    else:
        h1.text(components[-1], escape=True)

    h1.span("right").a(href=("/download/%s?repository=%s&sha1=%s" %
                             (urllib.quote(path), repository.name, file_sha1)),
                       download=urllib.quote(path)).text("[download]")
    h1.span("right").a(
        href=("/download/%s?repository=%s&sha1=%s" %
              (urllib.quote(path), repository.name, file_sha1))).text("[view]")

    table.tbody('spacer top').tr('spacer top').td(colspan=8).text()

    tbody = table.tbody("lines")

    yield document.render(stop=tbody, pretty=not compact)

    for linenr, line in enumerate(file.newLines(True)):
        linenr = linenr + 1
        highlight_class = ""

        if first is not None:
            if not (first_with_context <= linenr <= last_with_context):
                continue
            if linenr == first:
                highlight_class += " first-selected"
            if linenr == last:
                highlight_class += " last-selected"

        if tabify:
            line = htmlutils.tabify(line, tabwidth, indenttabsmode)

        line = line.replace("\r", "<i class='cr'></i>")

        row = tbody.tr("line context single",
                       id="f%do%dn%d" % (file.id, linenr, linenr))
        row.td("edge").text()
        row.td("linenr old").text(linenr)
        row.td("line single whole%s" % highlight_class,
               id="f%dn%d" % (file.id, linenr),
               colspan=4).innerHTML(line)
        row.td("linenr new").text(linenr)
        row.td("edge").text()

        if linenr % 500:
            yield document.render(stop=tbody, pretty=not compact)

    table.tbody('spacer bottom').tr('spacer bottom').td(colspan=8).text()

    yield document.render(pretty=not compact)
예제 #10
0
def parseDifferences(repository,
                     commit=None,
                     from_commit=None,
                     to_commit=None,
                     filter_paths=None,
                     selected_path=None,
                     simple=False):
    """parseDifferences(repository, [commit] | [from_commit, to_commit][, selected_path]) =>
         dict(parent_sha1 => [diff.File, ...] (if selected_path is None)
         diff.File                            (if selected_path is not None)"""

    options = []

    if from_commit and to_commit:
        command = 'diff'
        what = from_commit.sha1 + ".." + to_commit.sha1
    elif not commit.parents:
        # Root commit.

        command = "show"
        what = commit.sha1

        options.append("--pretty=format:")
    else:
        assert len(commit.parents) == 1

        command = 'diff'
        what = commit.parents[0] + '..' + commit.sha1

    if filter_paths is None and selected_path is None and not simple:
        names = repository.run(command, *(options + ["--name-only", what]))
        paths = set(filter(None, map(str.strip, names.splitlines())))
    else:
        paths = set()

    if not simple:
        options.append('--ignore-space-change')

    options.append(what)

    if filter_paths is not None:
        options.append('--')
        options.extend(filter_paths)
    elif selected_path is not None:
        options.append('--')
        options.append(selected_path)

    stdout = repository.run(command, '--full-index', '--unified=1',
                            '--patience', *options)
    selected_file = None

    re_chunk = re.compile('^@@ -(\\d+)(?:,\\d+)? \\+(\\d+)(?:,\\d+)? @@')
    re_binary = re.compile(
        '^Binary files (?:a/(.+)|/dev/null) and (?:b/(.+)|/dev/null) differ')
    re_diff = re.compile("^diff --git a/(.*) b/(.*)$")

    def isplitlines(text):
        start = 0
        length = len(text)

        while start < length:
            try:
                end = text.index('\n', start)
                yield text[start:end]
                start = end + 1
            except ValueError:
                yield text[start:]
                break

    lines = isplitlines(stdout)

    included = set()
    files = []
    files_by_path = {}

    def addFile(new_file):
        assert new_file.path not in files_by_path
        files.append(new_file)
        files_by_path[new_file.path] = new_file
        included.add(new_file.path)

    old_mode = None
    new_mode = None

    try:
        line = lines.next()

        names = None

        while True:
            old_mode = None
            new_mode = None

            # Scan to the 'index <sha1>..<sha1>' line that marks the beginning
            # of the differences in one file.
            while not line.startswith("index "):
                match = re_diff.match(line)
                if match:
                    if old_mode is not None and new_mode is not None:
                        addFile(
                            diff.File(None,
                                      names[0],
                                      None,
                                      None,
                                      repository,
                                      old_mode=old_mode,
                                      new_mode=new_mode,
                                      chunks=[]))
                    names = (match.group(1), match.group(2))
                elif line.startswith("old mode "):
                    old_mode = line[9:]
                elif line.startswith("new mode "):
                    new_mode = line[9:]
                elif line.startswith("new file mode "):
                    new_mode = line[14:]
                elif line.startswith("deleted file mode "):
                    old_mode = line[18:]

                line = lines.next()

            is_submodule = False

            try:
                sha1range, mode = line[6:].split(' ', 2)
                if mode == "160000":
                    is_submodule = True
                    old_mode = new_mode = mode
                old_sha1, new_sha1 = sha1range.split('..')
            except:
                old_sha1, new_sha1 = line[6:].split(' ', 1)[0].split("..")

            try:
                line = lines.next()
            except:
                if new_mode is not None:
                    assert names[0] == names[1]

                    addFile(
                        diff.File(None,
                                  names[0],
                                  old_sha1,
                                  new_sha1,
                                  repository,
                                  old_mode=old_mode,
                                  new_mode=new_mode,
                                  chunks=[diff.Chunk(0, 0, 0, 0)]))

                    old_mode = new_mode = None

            if re_diff.match(line):
                new_file = diff.File(None,
                                     names[0] or names[1],
                                     old_sha1,
                                     new_sha1,
                                     repository,
                                     old_mode=old_mode,
                                     new_mode=new_mode)

                if '0' * 40 == old_sha1 or '0' * 40 == new_sha1:
                    new_file.chunks = [diff.Chunk(0, 0, 0, 0)]
                else:
                    new_file.loadOldLines()
                    new_file.loadNewLines()
                    new_file.chunks = []

                    detectWhiteSpaceChanges(new_file, new_file.oldLines(False),
                                            1,
                                            new_file.oldCount() + 1, True,
                                            new_file.newLines(False), 1,
                                            new_file.newCount() + 1, True)

                addFile(new_file)

                old_mode = new_mode = False

                continue

            binary = re_binary.match(line)
            if binary:
                path = (binary.group(1) or binary.group(2)).strip()

                new_file = diff.File(None,
                                     path,
                                     old_sha1,
                                     new_sha1,
                                     repository,
                                     old_mode=old_mode,
                                     new_mode=new_mode)
                new_file.chunks = [diff.Chunk(0, 0, 0, 0)]

                addFile(new_file)

                continue

            if line.startswith("--- a/"): old_path = line[6:].strip()
            else: old_path = None

            line = lines.next()

            if line.startswith("+++ b/"): new_path = line[6:].strip()
            else: new_path = None

            assert (old_path is None) == ('0' * 40 == old_sha1)
            assert (new_path is None) == ('0' * 40 == new_sha1)

            if old_path: path = old_path
            else: path = new_path

            if is_submodule:
                line = lines.next()
                match = re_chunk.match(line)
                assert match, repr(line)
                assert match.group(1) == match.group(2) == "1", repr(
                    match.groups())

                line = lines.next()
                assert line == "-Subproject commit %s" % old_sha1, repr(line)

                line = lines.next()
                assert line == "+Subproject commit %s" % new_sha1, repr(line)

                new_file = diff.File(None,
                                     path,
                                     old_sha1,
                                     new_sha1,
                                     repository,
                                     old_mode=old_mode,
                                     new_mode=new_mode,
                                     chunks=[
                                         diff.Chunk(
                                             1,
                                             1,
                                             1,
                                             1,
                                             analysis="0=0:r18-58=18-58")
                                     ])

                if path not in files_by_path: addFile(new_file)

                old_mode = new_mode = None

                continue

            try:
                line = lines.next()

                delete_offset = 1
                delete_count = 0
                deleted_lines = []
                insert_offset = 1
                insert_count = 0
                inserted_lines = []

                if old_path and new_path and not simple:
                    old_lines = splitlines(repository.fetch(old_sha1).data)
                    new_lines = splitlines(repository.fetch(new_sha1).data)
                else:
                    old_lines = None
                    new_lines = None

                if path in files_by_path:
                    new_file = files_by_path[path]
                    if old_sha1 != '0' * 40:
                        assert new_file.old_sha1 == '0' * 40
                        new_file.old_sha1 = old_sha1
                        new_file.old_mode = old_mode
                    if new_sha1 != '0' * 40:
                        assert new_file.new_sha1 == '0' * 40
                        new_file.new_sha1 = new_sha1
                        new_file.new_mode = new_mode
                    new_file.chunks = []
                else:
                    new_file = diff.File(None,
                                         path,
                                         old_sha1,
                                         new_sha1,
                                         repository,
                                         old_mode=old_mode,
                                         new_mode=new_mode,
                                         chunks=[])

                old_mode = new_mode = None

                if selected_path is not None and selected_path == path:
                    selected_file = new_file

                if path not in files_by_path: addFile(new_file)

                previous_delete_offset = 1
                previous_insert_offset = 1

                while True:
                    match = re_chunk.match(line)

                    if not match: break

                    groups = match.groups()

                    delete_offset = int(groups[0])
                    deleted_lines = []

                    insert_offset = int(groups[1])
                    inserted_lines = []

                    while True:
                        line = lines.next()

                        if line == "\\ No newline at end of file": continue
                        if line[0] not in (' ', '-', '+'): break

                        if line[0] != ' ' and previous_delete_offset is not None and old_lines and new_lines and not simple:
                            detectWhiteSpaceChanges(files[-1], old_lines,
                                                    previous_delete_offset,
                                                    delete_offset, True,
                                                    new_lines,
                                                    previous_insert_offset,
                                                    insert_offset, True)
                            previous_delete_offset = None

                        if line[0] == ' ' and previous_delete_offset is None:
                            previous_delete_offset = delete_offset
                            previous_insert_offset = insert_offset

                        type = line[0]

                        if type == '-':
                            delete_offset += 1
                            deleted_lines.append(line[1:])
                        elif type == '+':
                            insert_offset += 1
                            inserted_lines.append(line[1:])
                        else:
                            if deleted_lines or inserted_lines:
                                chunks = createChunks(
                                    delete_offset - len(deleted_lines),
                                    deleted_lines,
                                    insert_offset - len(inserted_lines),
                                    inserted_lines)
                                files[-1].chunks.extend(chunks)
                                deleted_lines = []
                                inserted_lines = []

                            delete_offset += 1
                            insert_offset += 1

                    if deleted_lines or inserted_lines:
                        chunks = createChunks(
                            delete_offset - len(deleted_lines), deleted_lines,
                            insert_offset - len(inserted_lines),
                            inserted_lines)
                        files[-1].chunks.extend(chunks)
                        deleted_lines = []
                        inserted_lines = []

                if previous_delete_offset is not None and old_lines and new_lines and not simple:
                    detectWhiteSpaceChanges(files[-1], old_lines,
                                            previous_delete_offset,
                                            len(old_lines) + 1, True,
                                            new_lines, previous_insert_offset,
                                            len(new_lines) + 1, True)
                    previous_delete_offset = None
            except StopIteration:
                if deleted_lines or inserted_lines:
                    chunks = createChunks(delete_offset - len(deleted_lines),
                                          deleted_lines,
                                          insert_offset - len(inserted_lines),
                                          inserted_lines)
                    files[-1].chunks.extend(chunks)
                    deleted_lines = []
                    inserted_lines = []

                if previous_delete_offset is not None and old_lines and new_lines and not simple:
                    detectWhiteSpaceChanges(files[-1], old_lines,
                                            previous_delete_offset,
                                            len(old_lines) + 1, True,
                                            new_lines, previous_insert_offset,
                                            len(new_lines) + 1, True)

                raise
    except StopIteration:
        if old_mode is not None and new_mode is not None:
            assert names[0] == names[1]

            addFile(
                diff.File(None,
                          names[0],
                          None,
                          None,
                          repository,
                          old_mode=old_mode,
                          new_mode=new_mode,
                          chunks=[]))

    for path in (paths - included):
        lines = isplitlines(
            repository.run(command, '--full-index', '--unified=1', what, '--',
                           path))

        try:
            line = lines.next()

            while not line.startswith("index "):
                line = lines.next()

            try:
                sha1range, mode = line[6:].split(' ')
                if mode == "160000":
                    continue
                old_sha1, new_sha1 = sha1range.split("..")
            except:
                old_sha1, new_sha1 = line[6:].split(' ', 1)[0].split("..")

            if old_sha1 == '0' * 40 or new_sha1 == '0' * 40:
                # Added or removed empty file.
                continue

            addFile(
                diff.File(None,
                          path,
                          old_sha1,
                          new_sha1,
                          repository,
                          chunks=[]))

            old_data = repository.fetch(old_sha1).data
            old_lines = splitlines(old_data)
            new_data = repository.fetch(new_sha1).data
            new_lines = splitlines(new_data)

            assert len(old_lines) == len(new_lines), "%s:%d != %s:%d" % (
                old_sha1, len(old_lines), new_sha1, len(new_lines))

            def endsWithLinebreak(data):
                return data and data[-1] in "\n\r"

            detectWhiteSpaceChanges(files[-1], old_lines, 1,
                                    len(old_lines) + 1,
                                    endsWithLinebreak(old_data), new_lines, 1,
                                    len(new_lines) + 1,
                                    endsWithLinebreak(new_data))
        except StopIteration:
            pass

    if not simple:
        for file in files:
            mergeChunks(file)

    if from_commit and to_commit:
        if selected_path is not None:
            return selected_file
        else:
            return {from_commit.sha1: files}
    elif not commit.parents:
        return {None: files}
    else:
        return {commit.parents[0]: files}
예제 #11
0
                if current_state == 'pending':
                    pending_files.add(file_id)
                reviewable_files.add(file_id)

        profiler.check("reviewfiles query")

        for changeset in changesets:
            all_files_local = all_files.copy()

            for file in changeset.files:
                if file.id in all_files_local:
                    all_files_local.remove(file.id)

            for file_id in all_files_local:
                if not file_ids or file_id in file_ids:
                    changeset.files.append(diff.File(file_id, dbutils.describe_file(db, file_id), None, None, repository))

            if review_filter == "pending":
                def isPending(file): return file.id in pending_files
                changeset.files = filter(isPending, changeset.files)

            elif review_filter == "reviewable":
                def isReviewable(file): return file.id in reviewable_files
                changeset.files = filter(isReviewable, changeset.files)

            elif review_filter == "relevant":
                filters = review_filters.Filters()
                filters.load(db, review=review, user=user)

                def isRelevant(file):
                    if file.id in reviewable_files: return True