def check(conf, line): if line.start == 0 and len(line.buffer) > line.end: if conf['type'] == 'dos': if line.buffer[line.end - 1:line.end + 1] != '\r\n': yield LintProblem(1, line.end - line.start + 1, 'wrong new line character: expected \\r\\n') else: if line.end > 0 and line.buffer[line.end - 1] == '\r': yield LintProblem(1, line.end - line.start, 'wrong new line character: expected \\n')
def check(conf, token, prev, next, nextnext, context): if (conf['forbid'] is True and isinstance(token, yaml.FlowSequenceStartToken)): yield LintProblem(token.start_mark.line + 1, token.end_mark.column + 1, 'forbidden flow sequence') elif (conf['forbid'] == 'non-empty' and isinstance(token, yaml.FlowSequenceStartToken) and not isinstance(next, yaml.FlowSequenceEndToken)): yield LintProblem(token.start_mark.line + 1, token.end_mark.column + 1, 'forbidden flow sequence') elif (isinstance(token, yaml.FlowSequenceStartToken) and isinstance(next, yaml.FlowSequenceEndToken)): problem = spaces_after(token, prev, next, min=(conf['min-spaces-inside-empty'] if conf['min-spaces-inside-empty'] != -1 else conf['min-spaces-inside']), max=(conf['max-spaces-inside-empty'] if conf['max-spaces-inside-empty'] != -1 else conf['max-spaces-inside']), min_desc='too few spaces inside empty brackets', max_desc=('too many spaces inside empty ' 'brackets')) if problem is not None: yield problem elif isinstance(token, yaml.FlowSequenceStartToken): problem = spaces_after(token, prev, next, min=conf['min-spaces-inside'], max=conf['max-spaces-inside'], min_desc='too few spaces inside brackets', max_desc='too many spaces inside brackets') if problem is not None: yield problem elif (isinstance(token, yaml.FlowSequenceEndToken) and (prev is None or not isinstance(prev, yaml.FlowSequenceStartToken))): problem = spaces_before(token, prev, next, min=conf['min-spaces-inside'], max=conf['max-spaces-inside'], min_desc='too few spaces inside brackets', max_desc='too many spaces inside brackets') if problem is not None: yield problem
def check(conf, token, prev, next, nextnext, context): if conf['forbid-in-block-mappings']: if isinstance(token, yaml.ValueToken) and isinstance( next, (yaml.KeyToken, yaml.BlockEndToken)): yield LintProblem(token.start_mark.line + 1, token.end_mark.column + 1, 'empty value in block mapping') if conf['forbid-in-flow-mappings']: if isinstance(token, yaml.ValueToken) and isinstance( next, (yaml.FlowEntryToken, yaml.FlowMappingEndToken)): yield LintProblem(token.start_mark.line + 1, token.end_mark.column + 1, 'empty value in flow mapping')
def spaces_after(token, prev, next, min=-1, max=-1, min_desc=None, max_desc=None): if next is not None and token.end_mark.line == next.start_mark.line: spaces = next.start_mark.pointer - token.end_mark.pointer if max != -1 and spaces > max: return LintProblem(token.start_mark.line + 1, next.start_mark.column, max_desc) elif min != -1 and spaces < min: return LintProblem(token.start_mark.line + 1, next.start_mark.column + 1, min_desc)
def check(conf, token, prev, next, nextnext, context): if conf['present']: if (isinstance(prev, (yaml.StreamStartToken, yaml.DocumentEndToken, yaml.DirectiveToken)) and not isinstance(token, (yaml.DocumentStartToken, yaml.DirectiveToken, yaml.StreamEndToken))): yield LintProblem(token.start_mark.line + 1, 1, 'missing document start "---"') else: if isinstance(token, yaml.DocumentStartToken): yield LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, 'found forbidden document start "---"')
def check(conf, token, prev, next, nextnext, context): if prev is None: return curr_line_indent = token.start_mark.column if isinstance(token, yaml.StreamEndToken): curr_line_indent = 0 skip_first_line = True if isinstance(prev, yaml.StreamStartToken): skip_first_line = False prev_line_indent = 0 else: prev_line_indent = get_line_indent(prev) if prev_line_indent <= curr_line_indent: prev_line_indent = -1 # disable it for comment in get_comments_between_tokens( prev, token, skip_first_line=skip_first_line): if comment.column - 1 == curr_line_indent: prev_line_indent = -1 # disable it elif comment.column - 1 != prev_line_indent: yield LintProblem(comment.line, comment.column, 'comment not indented like content')
def check(conf, line): if line.end - line.start > conf['max']: conf['allow-non-breakable-words'] |= \ conf['allow-non-breakable-inline-mappings'] if conf['allow-non-breakable-words']: start = line.start while start < line.end and line.buffer[start] == ' ': start += 1 if start != line.end: if line.buffer[start] == '#': while line.buffer[start] == '#': start += 1 start += 1 elif line.buffer[start] == '-': start += 2 if line.buffer.find(' ', start, line.end) == -1: return if (conf['allow-non-breakable-inline-mappings'] and check_inline_mapping(line)): return yield LintProblem( line.line_no, conf['max'] + 1, 'line too long (%d > %d characters)' % (line.end - line.start, conf['max']))
def check(conf, token, prev, next, nextnext, context): if 'stack' not in context: context['stack'] = [] if isinstance(token, (yaml.BlockMappingStartToken, yaml.FlowMappingStartToken)): context['stack'].append(Parent(MAP)) elif isinstance( token, (yaml.BlockSequenceStartToken, yaml.FlowSequenceStartToken)): context['stack'].append(Parent(SEQ)) elif isinstance(token, (yaml.BlockEndToken, yaml.FlowMappingEndToken, yaml.FlowSequenceEndToken)): context['stack'].pop() elif (isinstance(token, yaml.KeyToken) and isinstance(next, yaml.ScalarToken)): # This check is done because KeyTokens can be found inside flow # sequences... strange, but allowed. if len(context['stack']) > 0 and context['stack'][-1].type == MAP: if next.value in context['stack'][-1].keys: yield LintProblem( next.start_mark.line + 1, next.start_mark.column + 1, 'duplication of key "%s" in mapping' % next.value) else: context['stack'][-1].keys.append(next.value)
def check(conf, line): if line.start == line.end and line.end < len(line.buffer): # Only alert on the last blank line of a serie if (line.end < len(line.buffer) - 1 and line.buffer[line.end + 1] == '\n'): return blank_lines = 0 while (line.start > blank_lines and line.buffer[line.start - blank_lines - 1] == '\n'): blank_lines += 1 max = conf['max'] # Special case: start of document if line.start - blank_lines == 0: blank_lines += 1 # first line doesn't have a preceding \n max = conf['max-start'] # Special case: end of document # NOTE: The last line of a file is always supposed to end with a new # line. See POSIX definition of a line at: if line.end == len(line.buffer) - 1 and line.buffer[line.end] == '\n': # Allow the exception of the one-byte file containing '\n' if line.end == 0: return max = conf['max-end'] if blank_lines > max: yield LintProblem(line.line_no, 1, 'too many blank lines (%d > %d)' % (blank_lines, max))
def check(conf, token, prev, next, nextnext, context): quote_type = conf['quote-type'] if (isinstance(token, yaml.tokens.ScalarToken) and isinstance(prev, (yaml.ValueToken, yaml.TagToken))): # Ignore explicit types, e.g. !!str testtest or !!int 42 if (prev and isinstance(prev, yaml.tokens.TagToken) and prev.value[0] == '!!'): return # Ignore numbers, booleans, etc. resolver = yaml.resolver.Resolver() if resolver.resolve(yaml.nodes.ScalarNode, token.value, (True, False)) != 'tag:yaml.org,2002:str': return # Ignore multi-line strings if (not token.plain) and (token.style == "|" or token.style == ">"): return if ((quote_type == 'single' and token.style != "'") or (quote_type == 'double' and token.style != '"') or (quote_type == 'any' and token.style is None)): yield LintProblem( token.start_mark.line + 1, token.start_mark.column + 1, "string value is not quoted with %s quotes" % (quote_type))
def check(conf, token, prev, next, nextnext, context): if isinstance(token, yaml.FlowEntryToken): if (prev is not None and conf['max-spaces-before'] != -1 and prev.end_mark.line < token.start_mark.line): yield LintProblem(token.start_mark.line + 1, max(1, token.start_mark.column), 'too many spaces before comma') else: problem = spaces_before(token, prev, next, max=conf['max-spaces-before'], max_desc='too many spaces before comma') if problem is not None: yield problem problem = spaces_after(token, prev, next, min=conf['min-spaces-after'], max=conf['max-spaces-after'], min_desc='too few spaces after comma', max_desc='too many spaces after comma') if problem is not None: yield problem
def check(conf, comment): if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and comment.pointer - comment.token_before.end_mark.pointer < conf['min-spaces-from-content']): yield LintProblem(comment.line_no, comment.column_no, 'too few spaces before comment') if conf['require-starting-space']: text_start = comment.pointer + 1 while (comment.buffer[text_start] == '#' and text_start < len(comment.buffer)): text_start += 1 if (text_start < len(comment.buffer) and comment.buffer[text_start] not in (' ', '\n', '\0')): yield LintProblem(comment.line_no, comment.column_no + text_start - comment.pointer, 'missing starting space in comment')
def check(conf, token, prev, next, nextnext, context): try: for problem in _check(conf, token, prev, next, nextnext, context): yield problem except AssertionError: yield LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, 'cannot infer indentation: unexpected token')
def check(conf, token, prev, next, nextnext, context): if prev and isinstance(prev, yaml.tokens.TagToken): return if isinstance(token, yaml.tokens.ScalarToken): if token.value in TRUTHY and token.style is None: yield LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, "truthy value is not quoted")
def spaces_before(token, prev, next, min=-1, max=-1, min_desc=None, max_desc=None): if (prev is not None and prev.end_mark.line == token.start_mark.line and # Discard tokens (only scalars?) that end at the start of next line (prev.end_mark.pointer == 0 or prev.end_mark.buffer[prev.end_mark.pointer - 1] != '\n')): spaces = token.start_mark.pointer - prev.end_mark.pointer if max != -1 and spaces > max: return LintProblem(token.start_mark.line + 1, token.start_mark.column, max_desc) elif min != -1 and spaces < min: return LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, min_desc)
def check(conf, token, prev, next, nextnext, context): for comment in get_comments_between_tokens(token, next): if (conf['min-spaces-from-content'] != -1 and not isinstance(token, yaml.StreamStartToken) and comment.line == token.end_mark.line + 1): # Sometimes token end marks are on the next line if token.end_mark.buffer[token.end_mark.pointer - 1] != '\n': if (comment.pointer - token.end_mark.pointer < conf['min-spaces-from-content']): yield LintProblem(comment.line, comment.column, 'too few spaces before comment') if (conf['require-starting-space'] and comment.pointer + 1 < len(comment.buffer) and comment.buffer[comment.pointer + 1] != ' ' and comment.buffer[comment.pointer + 1] != '\n'): yield LintProblem(comment.line, comment.column + 1, 'missing starting space in comment')
def check(conf, token, prev, next, nextnext, context): if not (isinstance(token, yaml.tokens.ScalarToken) and isinstance(prev, (yaml.ValueToken, yaml.TagToken))): return # Ignore explicit types, e.g. !!str testtest or !!int 42 if (prev and isinstance(prev, yaml.tokens.TagToken) and prev.value[0] == '!!'): return # Ignore numbers, booleans, etc. resolver = yaml.resolver.Resolver() tag = resolver.resolve(yaml.nodes.ScalarNode, token.value, (True, False)) if token.plain and tag != DEFAULT_SCALAR_TAG: return # Ignore multi-line strings if (not token.plain) and (token.style == "|" or token.style == ">"): return quote_type = conf['quote-type'] required = conf['required'] # Completely relaxed about quotes (same as the rule being disabled) if required is False and quote_type == 'any': return msg = None if required is True: # Quotes are mandatory and need to match config if token.style is None or not quote_match(quote_type, token.style): msg = "string value is not quoted with %s quotes" % (quote_type) elif required is False: # Quotes are not mandatory but when used need to match config if token.style and not quote_match(quote_type, token.style): msg = "string value is not quoted with %s quotes" % (quote_type) elif not token.plain: # Quotes are disallowed when not needed if (tag == DEFAULT_SCALAR_TAG and token.value and token.value[0] not in START_TOKENS): msg = "string value is redundantly quoted with %s quotes" % ( quote_type) # But when used need to match config elif token.style and not quote_match(quote_type, token.style): msg = "string value is not quoted with %s quotes" % (quote_type) if msg is not None: yield LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, msg)
def check(conf, token, prev, next, nextnext, context): if prev and isinstance(prev, yaml.tokens.TagToken): return if isinstance(token, yaml.tokens.ScalarToken): if (token.value in (set(TRUTHY) - set(conf['allowed-values'])) and token.style is None): yield LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, "truthy value should be one of [" + ", ".join(sorted(conf['allowed-values'])) + "]")
def check(conf, token, prev, next, nextnext, context): if conf['present']: is_stream_end = isinstance(token, yaml.StreamEndToken) is_start = isinstance(token, yaml.DocumentStartToken) prev_is_end_or_stream_start = isinstance( prev, (yaml.DocumentEndToken, yaml.StreamStartToken) ) if is_stream_end and not prev_is_end_or_stream_start: yield LintProblem(token.start_mark.line, 1, 'missing document end "..."') elif is_start and not prev_is_end_or_stream_start: yield LintProblem(token.start_mark.line + 1, 1, 'missing document end "..."') else: if isinstance(token, yaml.DocumentEndToken): yield LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, 'found forbidden document end "..."')
def check(conf, token, prev, next, nextnext, context): if prev and isinstance(prev, yaml.tokens.TagToken): return if conf['forbid-implicit-octal']: if isinstance(token, yaml.tokens.ScalarToken): if not token.style: val = token.value if val.isdigit() and len(val) > 1 and val[0] == '0': yield LintProblem( token.start_mark.line + 1, token.end_mark.column + 1, 'forbidden implicit octal value "%s"' % token.value) if conf['forbid-explicit-octal']: if isinstance(token, yaml.tokens.ScalarToken): if not token.style: val = token.value if len(val) > 2 and val[:2] == '0o' and val[2:].isdigit(): yield LintProblem( token.start_mark.line + 1, token.end_mark.column + 1, 'forbidden explicit octal value "%s"' % token.value)
def check(conf, comment): if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and comment.pointer - comment.token_before.end_mark.pointer < conf['min-spaces-from-content']): yield LintProblem(comment.line_no, comment.column_no, 'too few spaces before comment') if conf['require-starting-space']: text_start = comment.pointer + 1 while (comment.buffer[text_start] == '#' and text_start < len(comment.buffer)): text_start += 1 if text_start < len(comment.buffer): if (conf['ignore-shebangs'] and comment.line_no == 1 and comment.column_no == 1 and re.match(r'^!\S', comment.buffer[text_start:])): return elif comment.buffer[text_start] not in (' ', '\n', '\0'): column = comment.column_no + text_start - comment.pointer yield LintProblem(comment.line_no, column, 'missing starting space in comment')
def check(conf, comment): if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and comment.pointer - comment.token_before.end_mark.pointer < conf['min-spaces-from-content']): yield LintProblem(comment.line_no, comment.column_no, 'too few spaces before comment') if conf['require-starting-space']: text_start = comment.pointer + 1 while (comment.buffer[text_start] == '#' and text_start < len(comment.buffer)): text_start += 1 if text_start < len(comment.buffer): if (conf['ignore-shebangs'] and comment.line_no == 1 and comment.column_no == 1 and comment.buffer[text_start] == '!'): return # We can test for both \r and \r\n just by checking first char # \r itself is a valid newline on some older OS. elif comment.buffer[text_start] not in {' ', '\n', '\r', '\x00'}: column = comment.column_no + text_start - comment.pointer yield LintProblem(comment.line_no, column, 'missing starting space in comment')
def check(conf, line): if line.end - line.start > conf['max']: if conf['allow-non-breakable-words']: start = line.start while start < line.end and line.buffer[start] == ' ': start += 1 if start != line.end: if line.buffer[start] in ('#', '-'): start += 2 if line.buffer.find(' ', start, line.end) == -1: return yield LintProblem( line.line_no, conf['max'] + 1, 'line too long (%d > %d characters)' % (line.end - line.start, conf['max']))
def check(conf, comment): # Only check block comments if (not isinstance(comment.token_before, yaml.StreamStartToken) and comment.token_before.end_mark.line + 1 == comment.line_no): return next_line_indent = comment.token_after.start_mark.column if isinstance(comment.token_after, yaml.StreamEndToken): next_line_indent = 0 if isinstance(comment.token_before, yaml.StreamStartToken): prev_line_indent = 0 else: prev_line_indent = get_line_indent(comment.token_before) # In the following case only the next line indent is valid: # list: # # comment # - 1 # - 2 if prev_line_indent <= next_line_indent: prev_line_indent = next_line_indent # If two indents are valid but a previous comment went back to normal # indent, for the next ones to do the same. In other words, avoid this: # list: # - 1 # # comment on valid indent (0) # # comment on valid indent (4) # other-list: # - 2 if (comment.comment_before is not None and not comment.comment_before.is_inline()): prev_line_indent = comment.comment_before.column_no - 1 if (comment.column_no - 1 != prev_line_indent and comment.column_no - 1 != next_line_indent): yield LintProblem(comment.line_no, comment.column_no, 'comment not indented like content')
def check(conf, token, prev, next, nextnext, context): quote_type = conf['quote-type'] if prev and isinstance(prev, yaml.tokens.TagToken): if prev.value[1] != "str": # we ignore generic strings, e.g. somestring: !!str testtest return if isinstance(token, yaml.tokens.ScalarToken): if isinstance(prev, yaml.tokens.ValueToken) or \ isinstance(prev, yaml.tokens.TagToken): if ((not token.plain) and ((token.style == "|") or (token.style == ">"))): # we ignore multi-line strings return if ((quote_type == 'single' and token.style != "'") or (quote_type == 'double' and token.style != '"') or (quote_type == 'any' and token.style is None)): yield LintProblem( token.start_mark.line + 1, token.start_mark.column + 1, "string value is not quoted with %s quotes" % (quote_type) )
def check(conf, line): if line.end == len(line.buffer) and line.end > line.start: yield LintProblem(line.line_no, line.end - line.start + 1, 'no new line character at the end of file')
def check_scalar_indentation(conf, token, context): if token.start_mark.line == token.end_mark.line: return if token.plain: expected_indent = token.start_mark.column elif token.style in ('"', "'"): expected_indent = token.start_mark.column + 1 elif token.style in ('>', '|'): if context['stack'][-1].type == B_SEQ: # - > # multi # line expected_indent = token.start_mark.column + conf['spaces'] elif context['stack'][-1].type == KEY: assert context['stack'][-1].explicit_key # - ? > # multi-line # key # : > # multi-line # value expected_indent = token.start_mark.column + conf['spaces'] elif context['stack'][-1].type == VAL: if token.start_mark.line + 1 > context['cur_line']: # - key: # > # multi # line expected_indent = context['stack'][-1].indent + conf['spaces'] elif context['stack'][-2].explicit_key: # - ? key # : > # multi-line # value expected_indent = token.start_mark.column + conf['spaces'] else: # - key: > # multi # line expected_indent = context['stack'][-2].indent + conf['spaces'] else: expected_indent = context['stack'][-1].indent + conf['spaces'] line_no = token.start_mark.line + 1 line_start = token.start_mark.pointer while True: line_start = token.start_mark.buffer.find( '\n', line_start, token.end_mark.pointer - 1) + 1 if line_start == 0: break line_no += 1 indent = 0 while token.start_mark.buffer[line_start + indent] == ' ': indent += 1 if token.start_mark.buffer[line_start + indent] == '\n': continue if indent != expected_indent: yield LintProblem(line_no, indent + 1, 'wrong indentation: expected %d but found %d' % (expected_indent, indent))
def check(conf, token, prev, next, nextnext, context): if not (isinstance(token, yaml.tokens.ScalarToken) and isinstance( prev, (yaml.BlockEntryToken, yaml.FlowEntryToken, yaml.FlowSequenceStartToken, yaml.TagToken, yaml.ValueToken))): return # Ignore explicit types, e.g. !!str testtest or !!int 42 if (prev and isinstance(prev, yaml.tokens.TagToken) and prev.value[0] == '!!'): return # Ignore numbers, booleans, etc. resolver = yaml.resolver.Resolver() tag = resolver.resolve(yaml.nodes.ScalarNode, token.value, (True, False)) if token.plain and tag != DEFAULT_SCALAR_TAG: return # Ignore multi-line strings if (not token.plain) and (token.style == "|" or token.style == ">"): return quote_type = conf['quote-type'] msg = None if conf['required'] is True: # Quotes are mandatory and need to match config if token.style is None or not _quote_match(quote_type, token.style): msg = "string value is not quoted with %s quotes" % quote_type elif conf['required'] is False: # Quotes are not mandatory but when used need to match config if token.style and not _quote_match(quote_type, token.style): msg = "string value is not quoted with %s quotes" % quote_type elif not token.style: is_extra_required = any( re.search(r, token.value) for r in conf['extra-required']) if is_extra_required: msg = "string value is not quoted" elif conf['required'] == 'only-when-needed': # Quotes are not strictly needed here if (token.style and tag == DEFAULT_SCALAR_TAG and token.value and not _quotes_are_needed(token.value)): is_extra_required = any( re.search(r, token.value) for r in conf['extra-required']) is_extra_allowed = any( re.search(r, token.value) for r in conf['extra-allowed']) if not (is_extra_required or is_extra_allowed): msg = "string value is redundantly quoted with %s quotes" % ( quote_type) # But when used need to match config elif token.style and not _quote_match(quote_type, token.style): msg = "string value is not quoted with %s quotes" % quote_type elif not token.style: is_extra_required = len(conf['extra-required']) and any( re.search(r, token.value) for r in conf['extra-required']) if is_extra_required: msg = "string value is not quoted" if msg is not None: yield LintProblem(token.start_mark.line + 1, token.start_mark.column + 1, msg)
def check_scalar_indentation(conf, token, context): if token.start_mark.line == token.end_mark.line: return def compute_expected_indent(found_indent): def detect_indent(base_indent): if not isinstance(context['spaces'], int): context['spaces'] = found_indent - base_indent return base_indent + context['spaces'] if token.plain: return token.start_mark.column elif token.style in ('"', "'"): return token.start_mark.column + 1 elif token.style in ('>', '|'): if context['stack'][-1].type == B_ENT: # - > # multi # line return detect_indent(token.start_mark.column) elif context['stack'][-1].type == KEY: assert context['stack'][-1].explicit_key # - ? > # multi-line # key # : > # multi-line # value return detect_indent(token.start_mark.column) elif context['stack'][-1].type == VAL: if token.start_mark.line + 1 > context['cur_line']: # - key: # > # multi # line return detect_indent(context['stack'][-1].indent) elif context['stack'][-2].explicit_key: # - ? key # : > # multi-line # value return detect_indent(token.start_mark.column) else: # - key: > # multi # line return detect_indent(context['stack'][-2].indent) else: return detect_indent(context['stack'][-1].indent) expected_indent = None line_no = token.start_mark.line + 1 line_start = token.start_mark.pointer while True: line_start = token.start_mark.buffer.find( '\n', line_start, token.end_mark.pointer - 1) + 1 if line_start == 0: break line_no += 1 indent = 0 while token.start_mark.buffer[line_start + indent] == ' ': indent += 1 if token.start_mark.buffer[line_start + indent] == '\n': continue if expected_indent is None: expected_indent = compute_expected_indent(indent) if indent != expected_indent: yield LintProblem(line_no, indent + 1, 'wrong indentation: expected %d but found %d' % (expected_indent, indent))
def _check(conf, token, prev, next, nextnext, context): if 'stack' not in context: context['stack'] = [Parent(ROOT, 0)] context['cur_line'] = -1 context['spaces'] = conf['spaces'] context['indent-sequences'] = conf['indent-sequences'] # Step 1: Lint is_visible = ( not isinstance(token, (yaml.StreamStartToken, yaml.StreamEndToken)) and not isinstance(token, yaml.BlockEndToken) and not (isinstance(token, yaml.ScalarToken) and token.value == '')) first_in_line = (is_visible and token.start_mark.line + 1 > context['cur_line']) def detect_indent(base_indent, next): if not isinstance(context['spaces'], int): context['spaces'] = next.start_mark.column - base_indent return base_indent + context['spaces'] if first_in_line: found_indentation = token.start_mark.column expected = context['stack'][-1].indent if isinstance(token, (yaml.FlowMappingEndToken, yaml.FlowSequenceEndToken)): expected = context['stack'][-1].line_indent elif (context['stack'][-1].type == KEY and context['stack'][-1].explicit_key and not isinstance(token, yaml.ValueToken)): expected = detect_indent(expected, token) if found_indentation != expected: yield LintProblem(token.start_mark.line + 1, found_indentation + 1, 'wrong indentation: expected %d but found %d' % (expected, found_indentation)) if (isinstance(token, yaml.ScalarToken) and conf['check-multi-line-strings']): for problem in check_scalar_indentation(conf, token, context): yield problem # Step 2.a: if is_visible: context['cur_line'] = get_real_end_line(token) if first_in_line: context['cur_line_indent'] = found_indentation # Step 2.b: Update state if isinstance(token, yaml.BlockMappingStartToken): # - a: 1 # or # - ? a # : 1 # or # - ? # a # : 1 assert isinstance(next, yaml.KeyToken) assert next.start_mark.line == token.start_mark.line indent = token.start_mark.column context['stack'].append(Parent(B_MAP, indent)) elif isinstance(token, yaml.FlowMappingStartToken): if next.start_mark.line == token.start_mark.line: # - {a: 1, b: 2} indent = next.start_mark.column else: # - { # a: 1, b: 2 # } indent = detect_indent(context['cur_line_indent'], next) context['stack'].append(Parent(F_MAP, indent, line_indent=context['cur_line_indent'])) elif isinstance(token, yaml.BlockSequenceStartToken): # - - a # - b assert isinstance(next, yaml.BlockEntryToken) assert next.start_mark.line == token.start_mark.line indent = token.start_mark.column context['stack'].append(Parent(B_SEQ, indent)) elif (isinstance(token, yaml.BlockEntryToken) and # in case of an empty entry not isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))): # It looks like pyyaml doesn't issue BlockSequenceStartTokens when the # list is not indented. We need to compensate that. if context['stack'][-1].type != B_SEQ: context['stack'].append(Parent(B_SEQ, token.start_mark.column)) context['stack'][-1].implicit_block_seq = True if next.start_mark.line == token.end_mark.line: # - item 1 # - item 2 indent = next.start_mark.column elif next.start_mark.column == token.start_mark.column: # - # key: value indent = next.start_mark.column else: # - # item 1 # - # key: # value indent = detect_indent(token.start_mark.column, next) context['stack'].append(Parent(B_ENT, indent)) elif isinstance(token, yaml.FlowSequenceStartToken): if next.start_mark.line == token.start_mark.line: # - [a, b] indent = next.start_mark.column else: # - [ # a, b # ] indent = detect_indent(context['cur_line_indent'], next) context['stack'].append(Parent(F_SEQ, indent, line_indent=context['cur_line_indent'])) elif isinstance(token, yaml.KeyToken): indent = context['stack'][-1].indent context['stack'].append(Parent(KEY, indent)) context['stack'][-1].explicit_key = is_explicit_key(token) elif isinstance(token, yaml.ValueToken): assert context['stack'][-1].type == KEY # Special cases: # key: &anchor # value # and: # key: !!tag # value if isinstance(next, (yaml.AnchorToken, yaml.TagToken)): if (next.start_mark.line == prev.start_mark.line and next.start_mark.line < nextnext.start_mark.line): next = nextnext # Only if value is not empty if not isinstance(next, (yaml.BlockEndToken, yaml.FlowMappingEndToken, yaml.FlowSequenceEndToken, yaml.KeyToken)): if context['stack'][-1].explicit_key: # ? k # : value # or # ? k # : # value indent = detect_indent(context['stack'][-1].indent, next) elif next.start_mark.line == prev.start_mark.line: # k: value indent = next.start_mark.column elif isinstance(next, (yaml.BlockSequenceStartToken, yaml.BlockEntryToken)): # NOTE: We add BlockEntryToken in the test above because # sometimes BlockSequenceStartToken are not issued. Try # yaml.scan()ning this: # '- lib:\n' # ' - var\n' if context['indent-sequences'] is False: indent = context['stack'][-1].indent elif context['indent-sequences'] is True: if (context['spaces'] == 'consistent' and next.start_mark.column - context['stack'][-1].indent == 0): # In this case, the block sequence item is not indented # (while it should be), but we don't know yet the # indentation it should have (because `spaces` is # `consistent` and its value has not been computed yet # -- this is probably the beginning of the document). # So we choose an arbitrary value (2). indent = 2 else: indent = detect_indent(context['stack'][-1].indent, next) else: # 'whatever' or 'consistent' if next.start_mark.column == context['stack'][-1].indent: # key: # - e1 # - e2 if context['indent-sequences'] == 'consistent': context['indent-sequences'] = False indent = context['stack'][-1].indent else: if context['indent-sequences'] == 'consistent': context['indent-sequences'] = True # key: # - e1 # - e2 indent = detect_indent(context['stack'][-1].indent, next) else: # k: # value indent = detect_indent(context['stack'][-1].indent, next) context['stack'].append(Parent(VAL, indent)) consumed_current_token = False while True: if (context['stack'][-1].type == F_SEQ and isinstance(token, yaml.FlowSequenceEndToken) and not consumed_current_token): context['stack'].pop() consumed_current_token = True elif (context['stack'][-1].type == F_MAP and isinstance(token, yaml.FlowMappingEndToken) and not consumed_current_token): context['stack'].pop() consumed_current_token = True elif (context['stack'][-1].type in (B_MAP, B_SEQ) and isinstance(token, yaml.BlockEndToken) and not context['stack'][-1].implicit_block_seq and not consumed_current_token): context['stack'].pop() consumed_current_token = True elif (context['stack'][-1].type == B_ENT and not isinstance(token, yaml.BlockEntryToken) and context['stack'][-2].implicit_block_seq and not isinstance(token, (yaml.AnchorToken, yaml.TagToken)) and not isinstance(next, yaml.BlockEntryToken)): context['stack'].pop() context['stack'].pop() elif (context['stack'][-1].type == B_ENT and isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))): context['stack'].pop() elif (context['stack'][-1].type == VAL and not isinstance(token, yaml.ValueToken) and not isinstance(token, (yaml.AnchorToken, yaml.TagToken))): assert context['stack'][-2].type == KEY context['stack'].pop() context['stack'].pop() elif (context['stack'][-1].type == KEY and isinstance(next, (yaml.BlockEndToken, yaml.FlowMappingEndToken, yaml.FlowSequenceEndToken, yaml.KeyToken))): # A key without a value: it's part of a set. Let's drop this key # and leave room for the next one. context['stack'].pop() else: break