示例#1
0
    def _apply_pygments(self, data, filename):
        """Apply Pygments syntax-highlighting to a file's contents.

        This will only apply syntax highlighting if a lexer is available and
        the file extension is not blacklisted.

        Args:
            data (unicode):
                The data to syntax highlight.

            filename (unicode):
                The name of the file. This is used to help determine a
                suitable lexer.

        Returns:
            list of unicode:
            A list of lines, all syntax-highlighted, if a lexer is found.
            If no lexer is available, this will return ``None``.
        """
        if filename.endswith(self.STYLED_EXT_BLACKLIST):
            return None

        try:
            lexer = guess_lexer_for_filename(filename,
                                             data,
                                             stripnl=False,
                                             encoding='utf-8')
        except pygments.util.ClassNotFound:
            return None

        lexer.add_filter('codetagify')

        return split_line_endings(
            highlight(data, lexer, NoWrapperHtmlFormatter()))
示例#2
0
    def __init__(self, data):
        from reviewboard.diffviewer.diffutils import split_line_endings

        self.base_commit_id = None
        self.new_commit_id = None
        self.data = data
        self.lines = split_line_endings(data)
示例#3
0
    def __init__(self, data):
        from reviewboard.diffviewer.diffutils import split_line_endings

        self.base_commit_id = None
        self.new_commit_id = None
        self.data = data
        self.lines = split_line_endings(data)
    def _apply_pygments(self, data, filename):
        """Applies Pygments syntax-highlighting to a file's contents.

        The resulting HTML will be returned as a list of lines.
        """
        lexer = get_lexer_for_filename(filename, stripnl=False, encoding="utf-8")
        lexer.add_filter("codetagify")

        return split_line_endings(highlight(data, lexer, NoWrapperHtmlFormatter()))
示例#5
0
    def _apply_pygments(self, data, filename):
        """Applies Pygments syntax-highlighting to a file's contents.

        The resulting HTML will be returned as a list of lines.
        """
        lexer = get_lexer_for_filename(filename,
                                       stripnl=False,
                                       encoding='utf-8')
        lexer.add_filter('codetagify')

        return split_line_endings(
            highlight(data, lexer, NoWrapperHtmlFormatter()))
示例#6
0
    def _find_range_info_v1(diff):
        lines = split_line_endings(diff)
        process_changes = False
        process_trailing_context = False
        ranges = []

        for range_info in get_diff_data_chunks_info(diff):
            orig_info = range_info['orig']
            modified_info = range_info['modified']

            orig_pre_lines_of_context = orig_info['pre_lines_of_context']
            orig_post_lines_of_context = orig_info['post_lines_of_context']
            modified_pre_lines_of_context = \
                modified_info['pre_lines_of_context']
            modified_post_lines_of_context = \
                modified_info['post_lines_of_context']

            if modified_pre_lines_of_context and orig_pre_lines_of_context:
                pre_lines_of_context = min(orig_pre_lines_of_context,
                                           modified_pre_lines_of_context)
            else:
                pre_lines_of_context = (modified_pre_lines_of_context
                                        or orig_pre_lines_of_context)

            if modified_post_lines_of_context and orig_post_lines_of_context:
                post_lines_of_context = min(orig_post_lines_of_context,
                                            modified_post_lines_of_context)
            else:
                post_lines_of_context = (modified_post_lines_of_context
                                         or orig_post_lines_of_context)

            start = modified_info['chunk_start'] + pre_lines_of_context

            if pre_lines_of_context > 0:
                start -= 1

            length = (modified_info['chunk_len'] - pre_lines_of_context -
                      post_lines_of_context)

            ranges.append((start, start + length))

        return ranges
    def _apply_pygments(self, data, filename):
        """Applies Pygments syntax-highlighting to a file's contents.

        Syntax highlight obeys a explicitly provided list of preferences by
        extension or it is derived from the contents of the file.

        The resulting HTML will be returned as a list of lines.
        """
        lexer = self._get_preferred_lexer(
            filename, stripln=False, encoding='utf-8')
        logger.debug('preferred lexer for %s: %s' % (filename, lexer))
        if not lexer:
            lexer = guess_lexer_for_filename(
                filename, data, stripnl=False,
                encoding='utf-8')

        lexer.add_filter('codetagify')

        return split_line_endings(highlight(data, lexer,
                                            NoWrapperHtmlFormatter()))
示例#8
0
    def _find_range_info(diff):
        lines = split_line_endings(diff)
        process_changes = False
        ranges = []

        chunk_start = None
        chunk_len = 0
        lines_of_context = 0

        # Look through the chunks of the diff, trying to find the amount
        # of context shown at the beginning of each chunk. Though this
        # will usually be 3 lines, it may be fewer or more, depending
        # on file length and diff generation settings.
        for line in lines:
            if process_changes:
                if line.startswith((b'-', b'+')):
                    # We've found the first change in the chunk. We now
                    # know how many lines of context we have.
                    #
                    # We reduce the indexes by 1 because the chunk ranges
                    # in diffs start at 1, and we want a 0-based index.
                    start = chunk_start - 1 + lines_of_context
                    ranges.append((start, start + chunk_len))
                    process_changes = False
                    continue
                else:
                    lines_of_context += 1

            # This was not a change within a chunk, or we weren't processing,
            # so check to see if this is a chunk header instead.
            m = CHUNK_RANGE_RE.match(line)

            if m:
                # It is a chunk header. Reset the state for the next range,
                # and pull the line number and length from the header.
                chunk_start = int(m.group('new_start'))
                chunk_len = int(m.group('new_len') or '1')
                process_changes = True
                lines_of_context = 0

        return ranges
示例#9
0
    def __init__(self, data):
        """Initialize the parser.

        Args:
            data (bytes):
                The diff content to parse.

        Raises:
            TypeError:
                The provided ``data`` argument was not a ``bytes`` type.
        """
        from reviewboard.diffviewer.diffutils import split_line_endings

        if not isinstance(data, bytes):
            raise TypeError(
                _('%s expects bytes values for "data", not %s') %
                (type(self).__name__, type(data)))

        self.base_commit_id = None
        self.new_commit_id = None
        self.data = data
        self.lines = split_line_endings(data)
示例#10
0
    def _apply_pygments(self, data, filename):
        """Applies Pygments syntax-highlighting to a file's contents.

        Syntax highlight obeys a explicitly provided list of preferences by
        extension or it is derived from the contents of the file.

        The resulting HTML will be returned as a list of lines.
        """
        lexer = self._get_preferred_lexer(filename,
                                          stripln=False,
                                          encoding='utf-8')
        logger.debug('preferred lexer for %s: %s' % (filename, lexer))
        if not lexer:
            lexer = guess_lexer_for_filename(filename,
                                             data,
                                             stripnl=False,
                                             encoding='utf-8')

        lexer.add_filter('codetagify')

        return split_line_endings(
            highlight(data, lexer, NoWrapperHtmlFormatter()))
示例#11
0
    def _find_range_info(diff):
        lines = split_line_endings(diff)
        process_changes = False
        process_trailing_context = False
        ranges = []

        chunk_start = None
        chunk_len = 0
        lines_of_context = 0

        # Look through the chunks of the diff, trying to find the amount
        # of context shown at the beginning of each chunk. Though this
        # will usually be 3 lines, it may be fewer or more, depending
        # on file length and diff generation settings.
        for line in lines:
            if process_changes:
                if line.startswith((b'-', b'+')):
                    # We've found the first change in the chunk. We now
                    # know how many lines of context we have.
                    #
                    # We reduce the indexes by 1 because the chunk ranges
                    # in diffs start at 1, and we want a 0-based index.
                    start = chunk_start - 1 + lines_of_context
                    chunk_len -= lines_of_context

                    # We then reduce by 1 again, compensating for the
                    # additional line of context, if any. We must do this
                    # because the first line after a "@@" section is being
                    # counted in lines_of_context, but is also the line
                    # referred to in chunk_start.
                    if lines_of_context > 0:
                        start -= 1

                    ranges.append((start, start + chunk_len))
                    process_changes = False
                    process_trailing_context = True
                    lines_of_context = 0
                    continue
                else:
                    lines_of_context += 1
            elif process_trailing_context:
                if line.startswith(b' '):
                    # This may be a line of context after the modifications
                    # in this chunk. Bump up the counter.
                    lines_of_context += 1
                    continue
                else:
                    # Reset the lines of trailing context, since we hit
                    # something other than an equal line.
                    lines_of_context = 0

            # This was not a change within a chunk, or we weren't processing,
            # so check to see if this is a chunk header instead.
            m = CHUNK_RANGE_RE.match(line)

            if m:
                # It is a chunk header. Start by updating the previous range
                # to factor in the lines of trailing context.
                if process_trailing_context and lines_of_context > 0:
                    last_range = ranges[-1]
                    ranges[-1] = (last_range[0],
                                  last_range[1] - lines_of_context)

                # Next, reset the state for the next range, and pull the line
                # number and length from the header.
                chunk_start = int(m.group('new_start'))
                chunk_len = int(m.group('new_len') or '1')
                process_changes = True
                process_trailing_context = False
                lines_of_context = 0

        # We need to adjust the last range, if we're still processing
        # trailing context.
        if process_trailing_context and lines_of_context > 0:
            last_range = ranges[-1]
            ranges[-1] = (last_range[0],
                          last_range[1] - lines_of_context)

        return ranges
示例#12
0
    def _find_range_info(diff):
        lines = split_line_endings(diff)
        process_changes = False
        process_trailing_context = False
        ranges = []

        chunk_start = None
        chunk_len = 0
        lines_of_context = 0

        # Look through the chunks of the diff, trying to find the amount
        # of context shown at the beginning of each chunk. Though this
        # will usually be 3 lines, it may be fewer or more, depending
        # on file length and diff generation settings.
        for line in lines:
            if process_changes:
                if line.startswith((b'-', b'+')):
                    # We've found the first change in the chunk. We now
                    # know how many lines of context we have.
                    #
                    # We reduce the indexes by 1 because the chunk ranges
                    # in diffs start at 1, and we want a 0-based index.
                    start = chunk_start - 1 + lines_of_context
                    chunk_len -= lines_of_context

                    # We then reduce by 1 again, compensating for the
                    # additional line of context, if any. We must do this
                    # because the first line after a "@@" section is being
                    # counted in lines_of_context, but is also the line
                    # referred to in chunk_start.
                    if lines_of_context > 0:
                        start -= 1

                    ranges.append((start, start + chunk_len))
                    process_changes = False
                    process_trailing_context = True
                    lines_of_context = 0
                    continue
                else:
                    lines_of_context += 1
            elif process_trailing_context:
                if line.startswith(b' '):
                    # This may be a line of context after the modifications
                    # in this chunk. Bump up the counter.
                    lines_of_context += 1
                    continue
                else:
                    # Reset the lines of trailing context, since we hit
                    # something other than an equal line.
                    lines_of_context = 0

            # This was not a change within a chunk, or we weren't processing,
            # so check to see if this is a chunk header instead.
            m = CHUNK_RANGE_RE.match(line)

            if m:
                # It is a chunk header. Start by updating the previous range
                # to factor in the lines of trailing context.
                if process_trailing_context and lines_of_context > 0:
                    last_range = ranges[-1]
                    ranges[-1] = (last_range[0],
                                  last_range[1] - lines_of_context)

                # Next, reset the state for the next range, and pull the line
                # number and length from the header.
                chunk_start = int(m.group('new_start'))
                chunk_len = int(m.group('new_len') or '1')
                process_changes = True
                process_trailing_context = False
                lines_of_context = 0

        # We need to adjust the last range, if we're still processing
        # trailing context.
        if process_trailing_context and lines_of_context > 0:
            last_range = ranges[-1]
            ranges[-1] = (last_range[0], last_range[1] - lines_of_context)

        return ranges