def _analyze_require_blank_line(self, lToi): sSolution = 'Add blank line above *begin* keyword' dAction = {} dAction['action'] = 'Insert' for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) for iToken, oToken in enumerate(lTokens): if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.process_keyword, token.begin_keyword], iToken, lTokens): if not blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.close_parenthesis, token.begin_keyword], iToken, lTokens): if not blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.is_keyword, token.begin_keyword], iToken, lTokens): if not blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) break
def _analyze_require_blank_line(self, lToi): sSolution = 'Insert blank line above *begin* keyword' for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) lTokens.reverse() if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.is_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.close_parenthesis], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.process_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types([ token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line ], 0, lTokens): continue if utils.are_next_consecutive_token_types( [token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens): continue dAction = {} dAction['action'] = 'Insert' if isinstance(lTokens[1], parser.whitespace): dAction['index'] = len(lTokens) - 2 else: dAction['index'] = len(lTokens) - 1 lTokens.reverse() oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(token.process_keyword, token.begin_keyword) for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) lTokens.reverse() if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.is_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.close_parenthesis], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.process_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types([token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line], 0, lTokens): continue if utils.are_next_consecutive_token_types([token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens): continue dAction = {} if isinstance(lTokens[1], parser.whitespace): dAction['insert'] = len(lTokens) - 2 else: dAction['insert'] = len(lTokens) - 1 lTokens.reverse() oViolation = violation.New(iLine, oToi, self.solution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(token.process_keyword, token.begin_keyword) for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns( lTokens) for iToken, oToken in enumerate(lTokens): if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.process_keyword, token.begin_keyword], iToken, lTokens): if blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, self.solution) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.close_parenthesis, token.begin_keyword], iToken, lTokens): if blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, self.solution) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.is_keyword, token.begin_keyword], iToken, lTokens): if blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, self.solution) self.add_violation(oViolation) break
def _check_new_line_after_assign(self, oToi): if self.new_line_after_assign == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) iNextToken = utils.find_next_non_whitespace_token(1, lTokens) iNumCarriageReturns = utils.count_carriage_returns(lTokens[:iNextToken]) if iNumCarriageReturns == 0: if self.new_line_after_assign == 'yes': sSolution = 'Add return after assignment.' oViolation = _create_violation(oToi, iLine, 1, 1, 'new_line_after_assign', 'insert', sSolution) self.add_violation(oViolation) else: if self.new_line_after_assign == 'no': sSolution = 'Move code after assignment to the same line as assignment.' oViolation = _create_violation(oToi, iLine, 0, iNextToken, 'new_line_after_assign', 'remove', sSolution) self.add_violation(oViolation)
def _check_last_paren_new_line(self, oToi): if self.last_paren_new_line == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) lTokens.reverse() iLine = iLine + utils.count_carriage_returns(lTokens) bReturnFound = False bCommentFound = False for iToken, oToken in enumerate(lTokens): iLine = utils.decrement_line_number(iLine, oToken) if isinstance(oToken, parser.comment): bCommentFound = True if isinstance(oToken, parser.close_parenthesis): iEnd = len(lTokens) - iToken - 1 if utils.are_next_consecutive_token_types( [parser.whitespace, parser.carriage_return], iToken + 1, lTokens): bReturnFound = True elif utils.are_next_consecutive_token_types( [parser.carriage_return], iToken + 1, lTokens): bReturnFound = True lTokens.reverse() if self.last_paren_new_line == 'yes' and not bReturnFound: if self.move_last_comment == 'yes' and bCommentFound: sSolution = 'Move parenthesis after assignment to the next line and trailing comment to previous line.' oViolation = _create_violation(oToi, iLine, iEnd - 1, len(lTokens) - 1, 'last_paren_new_line', 'insert_and_move_comment', sSolution) self.add_violation(oViolation) else: sSolution = 'Move closing parenthesis to the next line.' oViolation = _create_violation(oToi, iLine, iEnd - 1, iEnd, 'last_paren_new_line', 'insert', sSolution) self.add_violation(oViolation) elif self.last_paren_new_line == 'no' and bReturnFound: sSolution = 'Move closing parenthesis to previous line.' iStart = utils.find_previous_non_whitespace_token( iEnd - 1, lTokens) oViolation = _create_violation(oToi, iLine, iStart, iEnd, 'last_paren_new_line', 'remove', sSolution) self.add_violation(oViolation) break
def _analyze_no_blank_line(self, lToi): sSolution = 'Remove blank line(s) above *begin* keyword' for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) lTokens.reverse() if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.is_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.close_parenthesis], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.process_keyword], 0, lTokens): continue if not utils.are_next_consecutive_token_types([token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line], 0, lTokens) and \ not utils.are_next_consecutive_token_types([token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens): continue dAction = {} dAction['action'] = 'Remove' if isinstance(lTokens[1], parser.whitespace): iEnd = len(lTokens) - 2 else: iEnd = len(lTokens) - 3 for iToken, oToken in enumerate(lTokens): if isinstance(oToken, parser.carriage_return): if not isinstance(lTokens[iToken + 1], parser.carriage_return): iStart = len(lTokens) - iToken - 2 break lTokens.reverse() dAction['start'] = iStart dAction['end'] = iEnd oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def get_line_count(self): return utils.count_carriage_returns(self.lAllObjects)