def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) fStartLine = False if isinstance(lTokens[0], parser.carriage_return) and isinstance( lTokens[1], parser.whitespace): fStartLine = True myToi = oToi.extract_tokens(1, 3) iLine, lTokens = utils.get_toi_parameters(myToi) dAction = {} check_spaces_on_left_side(lTokens, fStartLine, self.bNIsMinimum, dAction, self.iSpaces) check_spaces_on_right_side(lTokens, self.bNIsMinimum, dAction, self.iSpaces) if len(list(dAction.keys())) > 0: sSolution = create_solution_text(dAction, self.iSpaces, lTokens) oViolation = violation.New(iLine, myToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): lNewToi = [] for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) bInsideAssignment = False bResetFound = False iStartIndex = None for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if detect_clock_definition(iToken, oToken, lTokens): if bResetFound: lNewToi.append(oToi.extract_tokens( iStartIndex, iToken)) break if isinstance(oToken, token.if_statement.if_keyword ) and oToken.get_hierarchy() == 0: iStartIndex = iToken bResetFound = True for oToi in lNewToi: iLine, lTokens = utils.get_toi_parameters(oToi) bAfterFound = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if not bInsideAssignment: if detect_signal_assignment(oToken): bInsideAssignment = True continue if bAfterFound: if detect_end_signal_assignment(oToken): oNewToi = oToi.extract_tokens(iStartIndex, iToken) sSolution = 'Remove *after* from signals in reset portion of a clock process' oViolation = violation.New(iLine, oNewToi, sSolution) self.add_violation(oViolation) bInsideAssignment = False bAfterFound = False if isinstance(oToken, token.waveform_element.after_keyword): if isinstance(lTokens[iToken - 1], parser.whitespace): iStartIndex = iToken - 1 else: iStartIndex = iToken bAfterFound = True
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) dAction = {} oRight = lTokens[-1] if not isinstance(oRight, parser.carriage_return): if isinstance(oRight, parser.whitespace): if self.bNIsMinimum: if self.iSpaces > len(oRight.get_value()): dAction['right'] = {} dAction['right']['action'] = 'adjust' elif self.iSpaces != len(oRight.get_value()): dAction['right'] = {} dAction['right']['action'] = 'adjust' else: dAction['right'] = {} dAction['right']['action'] = 'insert' if len(list(dAction.keys())) > 0: sSolution = create_solution_text(dAction, self.iSpaces, lTokens) oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): if self.action == 'remove': for oToi in lToi: sSolution = self.action.capitalize() + ' ' + self.solution self.add_violation( violation.New(oToi.get_line_number(), oToi, sSolution)) return for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.does_token_type_exist_in_list_of_tokens( type(self.oInsertToken), lTokens): continue for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oSearch in self.lAnchorTokens: if isinstance(oToken, oSearch): iIndex = iToken iLineNumber = iLine sSolution = self.action.capitalize() + ' ' + self.solution oViolation = violation.New(iLineNumber, oToi.extract_tokens(iIndex, iIndex), sSolution) self.add_violation(oViolation)
def _check_first_paren_new_line(self, oToi): if self.first_paren_new_line == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) bSearch = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, token.constant_declaration.assignment_operator): iStart = iToken bSearch = True if isinstance(oToken, parser.open_parenthesis) and bSearch: if utils.find_carriage_return(lTokens[iStart:iToken]) is None: if self.first_paren_new_line == 'yes': sSolution = 'Move parenthesis after assignment to the next line.' oViolation = _create_violation(oToi, iLine, iToken - 1, iToken, 'first_paren_new_line', 'insert', sSolution) self.add_violation(oViolation) else: if self.first_paren_new_line == 'no': sSolution = 'Move parenthesis to same line as assignment operator.' oViolation = _create_violation(oToi, iLine, iStart, iToken, 'first_paren_new_line', 'remove', sSolution) self.add_violation(oViolation) break
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.left_token, self.right_token) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if not utils.does_token_type_exist_in_list_of_tokens(parser.carriage_return, lTokens): continue if not utils.does_token_start_line(len(lTokens) - 1, lTokens): continue iStartIndex = oToi.get_start_index() iEndIndex = oToi.get_end_index() iRightColumn = oFile.get_column_of_token_index(iStartIndex) iLeftColumn = oFile.get_column_of_token_index(iEndIndex) if iRightColumn + 1 != iLeftColumn: iLineNumber = iLine + utils.count_token_types_in_list_of_tokens(parser.carriage_return, lTokens) sSolution = 'Move ' + lTokens[-1].get_value() + ' to column ' + str(iRightColumn) dAction = {} if iLeftColumn == 1: dAction['action'] = 'insert' else: dAction['action'] = 'adjust' dAction['column'] = iRightColumn oViolation = violation.New(iLineNumber, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if not utils.does_token_type_exist_in_list_of_tokens(self.oMoveToken, lTokens): continue dAction = {} bPassing = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oAnchorToken in self.lAnchorTokens: if isinstance(oToken, oAnchorToken): dAction['insert'] = iToken + 1 sAnchorToken = oToken.get_value() iAnchorLine = iLine if utils.are_next_consecutive_token_types([parser.whitespace, self.oMoveToken], iToken + 1, lTokens): bPassing = True break elif isinstance(lTokens[iToken + 1], self.oMoveToken): bPassing = True break if isinstance(oToken, self.oMoveToken): iAnchorLine = iLine dAction['move_index'] = iToken sSolution = 'Move "' + oToken.get_value() + '" on line ' + str(iLine) + ' to the right of "' + sAnchorToken + '" on line ' + str(iAnchorLine) if bPassing: break else: oViolation = violation.New(iAnchorLine, oToi, sSolution) oViolation.set_action(dAction) oViolation.set_remap() self.add_violation(oViolation)
def _check_new_line_after_assign(self, oToi): if self.new_line_after_assign == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) iNextToken = utils.find_next_non_whitespace_token(1, lTokens) iNumCarriageReturns = utils.count_carriage_returns(lTokens[:iNextToken]) if iNumCarriageReturns == 0: if self.new_line_after_assign == 'yes': sSolution = 'Add return after assignment.' oViolation = _create_violation(oToi, iLine, 1, 1, 'new_line_after_assign', 'insert', sSolution) self.add_violation(oViolation) else: if self.new_line_after_assign == 'no': sSolution = 'Move code after assignment to the same line as assignment.' oViolation = _create_violation(oToi, iLine, 0, iNextToken, 'new_line_after_assign', 'remove', sSolution) self.add_violation(oViolation)
def analyze(self, oFile): lTargetTypes = oFile.get_tokens_matching(self.lTokens) lTargetValues = [] lTargetValuesLower = [] for oTargetType in lTargetTypes: oToken = oTargetType.get_tokens()[0] lTargetValues.append(oToken.get_value()) lTargetValuesLower.append(oToken.get_value().lower()) oToi = oFile.get_all_tokens() iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if is_token_in_ignore_token_list(oToken, self.lIgnoreTokens): continue sTokenValue = oToken.get_value() sTokenValueLower = sTokenValue.lower() for sTargetValue, sTargetValueLower in zip(lTargetValues, lTargetValuesLower): if sTokenValueLower == sTargetValueLower: if sTokenValue != sTargetValue: sSolution = 'Change "' + sTokenValue + '" to "' + sTargetValue + '"' oNewToi = oToi.extract_tokens(iToken, iToken) oViolation = violation.New(iLine, oNewToi, sSolution) dAction = {} dAction['constant'] = sTargetValue dAction['found'] = sTokenValue oViolation.set_action(dAction) self.add_violation(oViolation)
def _check_open_paren_new_line(self, oToi): if self.open_paren_new_line == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) bSearch = False bAssignmentFound = False bOthersClause = False iToken = _find_assignment_operator(lTokens) + 1 iStopIndex = len(lTokens) bFirstParenFound = False while iToken < iStopIndex: if bFirstParenFound: iToken, bOthersClause = _classify_others(iToken, lTokens) if bOthersClause: iToken += 1 continue iToken, bAssignmentFound = _classify_assignment(iToken, lTokens) if bAssignmentFound: iToken += 1 continue oToken = lTokens[iToken] if isinstance(oToken, parser.open_parenthesis): bFirstParenFound = True if utils.is_token_at_end_of_line(iToken, lTokens): if self.open_paren_new_line == 'no': iEnd = utils.find_next_non_whitespace_token( iToken + 1, lTokens) iErrorLine = rule_utils.number_of_carriage_returns( lTokens[:iToken]) + iLine sSolution = 'Remove carriage return after open parenthesis.' oViolation = _create_violation(oToi, iErrorLine, iToken, iEnd, 'open_paren_new_line', 'remove', sSolution) self.add_violation(oViolation) else: if self.open_paren_new_line == 'yes': sSolution = 'Add carriage return after open parenthesis.' iErrorLine = rule_utils.number_of_carriage_returns( lTokens[:iToken]) + iLine oViolation = _create_violation(oToi, iErrorLine, iToken, iToken, 'open_paren_new_line', 'insert', sSolution) self.add_violation(oViolation) bOthersClause = False iToken += 1 return None
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): oNewToi = oToi.extract_tokens(iToken, iToken + 1) oViolation = violation.New(iLine, oNewToi, self.solution) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) oRight = lTokens[-2] if isinstance(oRight, parser.whitespace): if not utils.token_is_whitespace_or_comment(lTokens[-1]): if not lTokens[-1].get_value().isnumeric(): oViolation = violation.New(iLine, oToi, self.solution) self.add_violation(oViolation)
def _is_open_paren_after_assignment(oToi): iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): if isinstance(oToken, token.constant_declaration.assignment_operator): if utils.are_next_consecutive_token_types_ignoring_whitespace([ token.constant_declaration.assignment_operator, parser.open_parenthesis ], iToken, lTokens): return True return False
def _check_assign_on_single_line(self, oToi): if self.assign_on_single_line == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) bSearch = False iOpenParen = 0 iCloseParen = 0 bAssignmentFound = False bOthersClause = False iToken = _find_assignment_operator(lTokens) + 1 iStopIndex = len(lTokens) bFirstParenFound = False while iToken < iStopIndex: if bFirstParenFound: iToken, bOthersClause = _classify_others(iToken, lTokens) if bOthersClause: iToken += 1 continue if lTokens[iToken].get_value() == '=>': iPreviousToken = utils.find_previous_non_whitespace_token( iToken - 1, lTokens) iToken, bAssignmentFound = _classify_assignment(iToken, lTokens) if bAssignmentFound: # rule_utils.print_debug(lTokens[iPreviousToken:iToken + 1]) if rule_utils.number_of_carriage_returns( lTokens[iPreviousToken:iToken]) > 0: iErrorLine = rule_utils.number_of_carriage_returns( lTokens[:iPreviousToken]) + iLine sSolution = 'Remove carriage returns for assignments.' oViolation = _create_violation(oToi, iErrorLine, iPreviousToken, iToken, 'assign_on_single_line', 'remove', sSolution) self.add_violation(oViolation) oToken = lTokens[iToken] if isinstance(oToken, parser.open_parenthesis): bFirstParenFound = True iToken += 1 return None
def _check_last_paren_new_line(self, oToi): if self.last_paren_new_line == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) lTokens.reverse() iLine = iLine + utils.count_carriage_returns(lTokens) bReturnFound = False bCommentFound = False for iToken, oToken in enumerate(lTokens): iLine = utils.decrement_line_number(iLine, oToken) if isinstance(oToken, parser.comment): bCommentFound = True if isinstance(oToken, parser.close_parenthesis): iEnd = len(lTokens) - iToken - 1 if utils.are_next_consecutive_token_types( [parser.whitespace, parser.carriage_return], iToken + 1, lTokens): bReturnFound = True elif utils.are_next_consecutive_token_types( [parser.carriage_return], iToken + 1, lTokens): bReturnFound = True lTokens.reverse() if self.last_paren_new_line == 'yes' and not bReturnFound: if self.move_last_comment == 'yes' and bCommentFound: sSolution = 'Move parenthesis after assignment to the next line and trailing comment to previous line.' oViolation = _create_violation(oToi, iLine, iEnd - 1, len(lTokens) - 1, 'last_paren_new_line', 'insert_and_move_comment', sSolution) self.add_violation(oViolation) else: sSolution = 'Move closing parenthesis to the next line.' oViolation = _create_violation(oToi, iLine, iEnd - 1, iEnd, 'last_paren_new_line', 'insert', sSolution) self.add_violation(oViolation) elif self.last_paren_new_line == 'no' and bReturnFound: sSolution = 'Move closing parenthesis to previous line.' iStart = utils.find_previous_non_whitespace_token( iEnd - 1, lTokens) oViolation = _create_violation(oToi, iLine, iStart, iEnd, 'last_paren_new_line', 'remove', sSolution) self.add_violation(oViolation) break
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) for oToken in lTokens: iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, token.constant_keyword): iKeywordLine = iLine if isinstance(oToken, token.assignment_operator): if iKeywordLine != iLine: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) self.add_violation(oViolation)
def _analyze(self, lToi): oToi = lToi[0] iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens[:len(lTokens) - 2]): iLine = utils.increment_line_number(iLine, oToken) if oToken.get_value().lower() == 'std_logic_vector': if utils.are_next_consecutive_token_types( [parser.whitespace, parser.open_parenthesis], iToken + 1, lTokens): lExtractedTokens = oToi.extract_tokens(iToken, iToken + 1) oViolation = violation.New(iLine, lExtractedTokens, self.solution) self.add_violation(oViolation)
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lToi = self._get_tokens_of_interest(oFile) lUpdate = [] for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iComments = utils.count_token_types_in_list_of_tokens(parser.comment, lTokens) iComment = 0 for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.comment): iComment += 1 if iComment == 1: if not is_header(oToken.get_value()): break elif iComment > 1 and iComment < iComments: if not self.allow_indenting: oToken.set_indent(0) if self.comment_left is None: continue if isinstance(lTokens[iToken - 1], parser.whitespace): if not self.allow_indenting: break sHeader = '--' sHeader += self.comment_left sComment = oToken.get_value() if not sComment.startswith(sHeader): sSolution = 'Comment must start with ' + sHeader oViolation = violation.New(iLine, oToi, sSolution) self.add_violation(oViolation) if not self.allow_indenting: lUpdate.append(violation.New(0, oToi, '')) if not self.allow_indenting: oFile.update(lUpdate)
def _analyze(self, lToi): if self.action == 'remove': for oToi in lToi: sSolution = self.action.capitalize() + ' ' + self.solution self.add_violation( violation.New(oToi.get_line_number(), oToi, sSolution)) return for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) bFound = False for oToken in lTokens: iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, self.insert_token): bFound = True break if not bFound: sSolution = self.action.capitalize() + ' ' + self.solution self.add_violation(violation.New(iLine, oToi, sSolution))
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) lLabels = [] for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if manage_labels(oToken, lLabels): continue if isinstance(oToken, token.for_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.for_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.for_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue if isinstance(oToken, token.if_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.if_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.if_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue if isinstance(oToken, token.case_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.case_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.case_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.lAnchorTokens[0], self.oEndToken) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.does_token_type_exist_in_list_of_tokens(type(self.oInsertToken), lTokens): continue dAction = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oSearch in self.lAnchorTokens: if isinstance(oToken, oSearch): iIndex = iToken iLineNumber = iLine sToken = oToken.get_value() sSolution = 'Add *is* keyword to the right of ' + sToken oViolation = violation.New(iLineNumber, oToi.extract_tokens(iIndex, iIndex), sSolution) self.add_violation(oViolation)
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lTargetTypes = oFile.get_tokens_matching_in_range_bounded_by_tokens( self.lTokens, self.oStart, self.oEnd) lTargetValues = [] lTargetValuesLower = [] for oTargetType in lTargetTypes: oToken = oTargetType.get_tokens()[0] lTargetValues.append(oToken.get_value()) lTargetValuesLower.append(oToken.get_value().lower()) lToi = oFile.get_tokens_bounded_by(self.oRegionStart, self.oRegionEnd) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if is_token_in_ignore_token_list(oToken, self.lIgnoreTokens): continue sTokenValue = oToken.get_value() sTokenValueLower = sTokenValue.lower() for sTargetValue, sTargetValueLower in zip( lTargetValues, lTargetValuesLower): if sTokenValueLower == sTargetValueLower: if sTokenValue != sTargetValue: sSolution = 'Change "' + sTokenValue + '" to "' + sTargetValue + '"' oNewToi = oToi.extract_tokens(iToken, iToken) oViolation = violation.New(iLine, oNewToi, sSolution) dAction = {} dAction['constant'] = sTargetValue dAction['found'] = sTokenValue oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lToi = self._get_tokens_of_interest(oFile) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iSpaces = self._calculate_column(oFile, oToi, lTokens) for iToken, oToken in enumerate(lTokens): if isinstance(oToken, parser.carriage_return): iLine += 1 if isinstance(lTokens[iToken + 1], parser.whitespace): if len(lTokens[iToken + 1].get_value()) != iSpaces: self._create_violation( iLine, iSpaces, 'adjust', oToi.extract_tokens(iToken + 1, iToken + 1)) elif isinstance(lTokens[iToken + 1], parser.blank_line): continue else: self._create_violation( iLine, iSpaces, 'insert', oToi.extract_tokens(iToken + 1, iToken + 1))
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.lTokenPairs[0][0], self.lTokenPairs[0][1]) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iStartColumn = calculate_start_column(oFile, oToi) lColumn = [] lColumn.append(iStartColumn) iColumn = oFile.get_column_of_token_index(oToi.get_start_index()) iIndent = 0 bWaveform = True bCondition = False iPreviousColumn = 0 for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.carriage_return): iPreviousColumn = lColumn[-1] iColumn = 0 iIndent = calculate_indent(iToken, lTokens) oNextToken = lTokens[iToken + 1] if bWaveform: iAdjustIndex = -1 if isinstance(oNextToken, parser.whitespace): if len(oNextToken.get_value()) != iStartColumn: oViolation = build_violation( iLine, iStartColumn, 'adjust', oToi, iToken) self.add_violation(oViolation) else: oViolation = build_violation( iLine, iStartColumn, 'insert', oToi, iToken) self.add_violation(oViolation) if bCondition: if isinstance(oNextToken, parser.whitespace): oSecondToken = lTokens[iToken + 2] if isinstance(oSecondToken, parser.close_parenthesis): iAdjustIndex = -2 else: iAdjustIndex = -1 if len(oNextToken.get_value() ) != lColumn[iAdjustIndex]: oViolation = build_violation( iLine, lColumn[iAdjustIndex], 'adjust', oToi, iToken) self.add_violation(oViolation) else: if isinstance(oNextToken, parser.close_parenthesis): iAdjustIndex = -2 else: iAdjustIndex = -1 oViolation = build_violation( iLine, lColumn[iAdjustIndex], 'insert', oToi, iToken) self.add_violation(oViolation) continue iColumn += len(oToken.get_value()) if isinstance(oToken, token.conditional_waveforms.when_keyword): bWaveform = False bCondition = True lColumn.append(iColumn + 1) if isinstance(oToken, token.conditional_waveforms.else_keyword): bWaveform = True bCondition = False if isinstance(oToken, parser.open_parenthesis): lColumn.append(iColumn + iPreviousColumn - iIndent) if isinstance(oToken, parser.close_parenthesis): lColumn.pop()
def analyze(self, oFile): lToi = self._get_tokens_of_interest(oFile) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) # print('='*5 + str(iLine) + '='*70) iFirstLine, iFirstLineIndent = _get_first_line_info(iLine, oFile) iAssignColumn = oFile.get_column_of_token_index( oToi.get_start_index()) iColumn = iAssignColumn dActualIndent = {} dActualIndent[iLine] = iFirstLineIndent lParens = [] dIndex = {} bStartsWithParen = _starts_with_paren(lTokens) bSkipCommentLine = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): continue if bSkipCommentLine: if not isinstance(oToken, parser.carriage_return): continue if isinstance(oToken, parser.carriage_return): iColumn = 0 bSkipCommentLine = rules_utils.does_line_start_with_comment( lTokens[iToken + 1:iToken + 3]) if bSkipCommentLine: dActualIndent[iLine] = None else: dActualIndent[iLine] = _set_indent(iToken, lTokens) dIndex[iLine] = iToken + 1 continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.close_parenthesis): dParen = {} dParen['type'] = 'close' dParen['line'] = iLine dParen['column'] = iColumn dParen['begin_line'] = utils.does_token_start_line( iToken, lTokens) lParens.append(dParen) if isinstance(oToken, parser.open_parenthesis): dParen = {} dParen['type'] = 'open' dParen['line'] = iLine dParen['column'] = iColumn lParens.append(dParen) iLastLine = iLine if iFirstLine == iLastLine: continue iFirstTokenLength = len(lTokens[0].get_value()) if self.align_paren == 'no' and self.align_left == 'yes': dExpectedIndent = _analyze_align_paren_no( iFirstLine, iLastLine, lParens, self.indentSize, dActualIndent, bStartsWithParen) if self.align_paren == 'yes' and self.align_left == 'no': dExpectedIndent = _analyze_align_paren_yes_align_left_no( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn, iFirstTokenLength) if self.align_paren == 'yes' and self.align_left == 'yes': dExpectedIndent = _analyze_align_paren_yes_align_left_yes( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) if self.align_paren == 'no' and self.align_left == 'no': dExpectedIndent = _analyze_align_paren_no_align_left_no( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) # print(f'Actual = {dActualIndent}') # print(f'Expect = {dExpectedIndent}') # print(f'Index = {dIndex}') for iLine in range(iFirstLine, iLastLine + 1): if dActualIndent[iLine] is None: continue if indents_match(dActualIndent[iLine], dExpectedIndent[iLine]): continue oViolation = build_violation(iLine, oToi, iToken, dExpectedIndent, dIndex, dActualIndent) self.add_violation(oViolation)
def analyze(self, oFile): lToi = self._get_tokens_of_interest(oFile) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) # print('='*5 + str(iLine) + '='*70) iFirstLine, iFirstLineIndent = _get_first_line_info(iLine, oFile) iFirstColumn, iNextColumn, iLastColumn = _find_first_column( oFile, oToi, self.align_left, iFirstLineIndent, self.indentSize) iAssignColumn = oFile.get_column_of_token_index( oToi.get_start_index()) iColumn = iAssignColumn dActualIndent = {} dActualIndent[iLine] = iFirstLineIndent lParens = [] dIndex = {} bStartsWithParen = _starts_with_paren(lTokens) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): continue if isinstance(oToken, parser.carriage_return): iColumn = 0 dActualIndent[iLine] = _set_indent(iToken, lTokens) dIndex[iLine] = iToken + 1 continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.close_parenthesis): dParen = {} dParen['type'] = 'close' dParen['line'] = iLine dParen['column'] = iColumn dParen['begin_line'] = utils.does_token_start_line( iToken, lTokens) lParens.append(dParen) if isinstance(oToken, parser.open_parenthesis): dParen = {} dParen['type'] = 'open' dParen['line'] = iLine dParen['column'] = iColumn lParens.append(dParen) iLastLine = iLine if iFirstLine == iLastLine: continue if not self.align_paren and self.align_left: dExpectedIndent = _analyze_align_paren_false( iFirstLine, iLastLine, lParens, self.indentSize, dActualIndent, bStartsWithParen) if self.align_paren and not self.align_left: dExpectedIndent = _analyze_align_paren_true( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) if self.align_paren and self.align_left: dExpectedIndent = _analyze_align_paren_true_align_left_true( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) # print(f'Actual = {dActualIndent}') # print(f'Expect = {dExpectedIndent}') # print(f'Index = {dIndex}') for iLine in range(iFirstLine, iLastLine + 1): if dActualIndent[iLine] == dExpectedIndent[iLine]: continue dAction = {} dAction['line'] = iLine dAction['column'] = dExpectedIndent[iLine] if dActualIndent[iLine] > 0: dAction['action'] = 'adjust' else: dAction['action'] = 'insert' sSolution = 'Adjust indent to column ' + str( dExpectedIndent[iLine]) iToken = dIndex[iLine] oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lToi = self._get_tokens_of_interest(oFile) lUpdate = [] for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iComments = utils.count_token_types_in_list_of_tokens( parser.comment, lTokens) iComment = 0 for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.comment): iComment += 1 if iComment == iComments: if not self.allow_indenting: if isinstance(lTokens[iToken - 1], parser.whitespace): break else: oToken.set_indent(0) iWhitespace = self.indentSize * oToken.get_indent() sFooter = '--' if self.footer_left is not None: sFooter += self.footer_left iFooter_left = len(self.footer_left) else: iFooter_left = 0 if self.footer_string is None: sFooter += self.footer_left_repeat * ( self.max_footer_column - iWhitespace - len(sFooter)) elif self.footer_alignment == 'center': iLength = int( (self.max_footer_column - iWhitespace - len(self.footer_string)) / 2) - iFooter_left - 2 sFooter += self.footer_left_repeat * (iLength) sFooter += self.footer_string sFooter += self.footer_right_repeat * ( self.max_footer_column - len(sFooter)) elif self.footer_alignment == 'left': sFooter += self.footer_left_repeat sFooter += self.footer_string iLength = self.max_footer_column - iWhitespace - len( sFooter) sFooter += self.footer_right_repeat * ( self.max_footer_column - len(sFooter)) elif self.footer_alignment == 'right': iLength = self.max_footer_column - iWhitespace - len( sFooter) - len(self.footer_string) - 1 sFooter += self.footer_left_repeat * (iLength) sFooter += self.footer_string sFooter += self.footer_right_repeat sComment = oToken.get_value() try: if is_footer(sComment): if not self.allow_indenting: oToken.set_indent(0) if sComment != sFooter: sSolution = 'Change block comment footer to : ' + sFooter oViolation = violation.New( iLine, oToi, sSolution) self.add_violation(oViolation) break except IndexError: break if not self.allow_indenting: lUpdate.append(violation.New(0, oToi, '')) if not self.allow_indenting: oFile.update(lUpdate)
def analyze(self, oFile): lToi = [] for lTokenPair in self.lTokenPairs: aToi = oFile.get_tokens_bounded_by(lTokenPair[0], lTokenPair[1], bExcludeLastToken=self.bExcludeLastToken) lToi = utils.combine_two_token_class_lists(lToi, aToi) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.are_next_consecutive_token_types_ignoring_whitespace([parser.open_parenthesis], 1, lTokens): continue iStartColumn = calculate_start_column(oFile, oToi) lColumn = [] lColumn.append(iStartColumn) bCheckAlignment = False iFirstColumn = oFile.get_column_of_token_index(oToi.get_start_index()) iColumn = iFirstColumn iPreviousColumn = 0 iIndent = 0 # print('-'*80) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.carriage_return): bCheckAlignment = True iPreviousColumn = lColumn[-1] iColumn = 0 if isinstance(lTokens[iToken + 1], parser.whitespace): iIndent = len(lTokens[iToken + 1].get_value()) else: iIndent = 0 continue if isinstance(oToken, parser.blank_line): bCheckAlignment = False continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.open_parenthesis): lColumn.append(iColumn + iPreviousColumn - iIndent) if isinstance(oToken, parser.close_parenthesis): lColumn.pop() if bCheckAlignment: if isinstance(oToken, parser.whitespace): if len(oToken.get_value()) != lColumn[-1]: dAction = {} dAction['line'] = iLine dAction['column'] = lColumn[-1] dAction['action'] = 'adjust' dAction['indent'] = iIndent dAction['previous'] = iPreviousColumn oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), self.solution) oViolation.set_action(dAction) self.add_violation(oViolation) # print(dAction) else: if lColumn != 0: dAction = {} if isinstance(oToken, parser.open_parenthesis): dAction['column'] = lColumn[-2] else: dAction['column'] = lColumn[-1] dAction['action'] = 'insert' oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), self.solution) oViolation.set_action(dAction) self.add_violation(oViolation) bCheckAlignment = False
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iColumn = 0 bSignalFound = False bSkip = False dAnalysis = {} dTemp = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.carriage_return): iColumn = 0 else: iColumn += len(oToken.get_value()) bSkip = check_for_exclusions(bSkip, oToken, self.lUnless) if bSkip: continue bSignalFound = check_for_signal_declaration(bSignalFound, oToken) if not bSignalFound: iComma = 0 continue if isinstance(oToken, token.signal_declaration.colon): bSignalFound = False if iComma == 1: dAnalysis[dTemp['line_number']] = dTemp continue if isinstance(oToken, parser.comma): iComma += 1 if iComma == 2: bSignalFound = False continue dTemp = {} dTemp['comma_column'] = iColumn dTemp['comma_index'] = iToken dTemp['line_number'] = iLine if utils.are_next_consecutive_token_types([parser.whitespace, token.signal_declaration.identifier], iToken + 1, lTokens): dTemp['identifier_column'] = iColumn + len(lTokens[iToken + 1].get_value()) dTemp['token_index'] = iToken + 2 dTemp['token_value'] = lTokens[iToken + 2].get_value() elif utils.are_next_consecutive_token_types([token.signal_declaration.identifier], iToken + 1, lTokens): dTemp['identifier_column'] = iColumn + 1 dTemp['token_index'] = iToken + 1 dTemp['token_value'] = lTokens[iToken + 1].get_value() else: bSignalFound = False add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oToi.extract_tokens(dAnalysis[iKey]['comma_index'], dAnalysis[iKey]['token_index']) sSolution = 'Move ' + dAnalysis[iKey]['token_value'] + ' ' + str(dAnalysis[iKey]['adjust']) + ' columns' oViolation = violation.New(dAnalysis[iKey]['line_number'], oLineTokens, sSolution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {}
def _check_close_paren_new_line(self, oToi): if self.close_paren_new_line == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) bAssignmentFound = False bOthersClause = False iToken = _find_assignment_operator(lTokens) + 1 iStopIndex = _find_last_closing_paren(lTokens) bFirstParenFound = False while iToken < iStopIndex: if bFirstParenFound: iToken, bOthersClause = _classify_others(iToken, lTokens) if bOthersClause: iToken += 1 continue iToken, bAssignmentFound = _classify_assignment(iToken, lTokens) if bAssignmentFound: iToken += 1 continue oToken = lTokens[iToken] if isinstance(oToken, parser.open_parenthesis): bFirstParenFound = True if isinstance(oToken, parser.close_parenthesis): if utils.does_token_start_line(iToken, lTokens): if self.close_paren_new_line == 'no': iStart = utils.find_previous_non_whitespace_token( iToken - 1, lTokens) iErrorLine = rules_utils.number_of_carriage_returns( lTokens[:iToken]) + iLine sSolution = 'Move closing parenthesis to previous line.' oViolation = _create_violation(oToi, iErrorLine, iStart, iToken, 'close_paren_new_line', 'remove', sSolution) self.add_violation(oViolation) else: if self.close_paren_new_line == 'yes': iStart = iToken - 1 iErrorLine = rules_utils.number_of_carriage_returns( lTokens[:iToken]) + iLine sSolution = 'Move closing parenthesis to the next line.' oViolation = _create_violation(oToi, iErrorLine, iStart, iToken, 'close_paren_new_line', 'insert', sSolution) self.add_violation(oViolation) iToken += 1 return None