def _analyze(self, lToi): lNewToi = [] for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) bInsideAssignment = False bResetFound = False iStartIndex = None for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if detect_clock_definition(iToken, oToken, lTokens): if bResetFound: lNewToi.append(oToi.extract_tokens( iStartIndex, iToken)) break if isinstance(oToken, token.if_statement.if_keyword ) and oToken.get_hierarchy() == 0: iStartIndex = iToken bResetFound = True for oToi in lNewToi: iLine, lTokens = utils.get_toi_parameters(oToi) bAfterFound = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if not bInsideAssignment: if detect_signal_assignment(oToken): bInsideAssignment = True continue if bAfterFound: if detect_end_signal_assignment(oToken): oNewToi = oToi.extract_tokens(iStartIndex, iToken) sSolution = 'Remove *after* from signals in reset portion of a clock process' oViolation = violation.New(iLine, oNewToi, sSolution) self.add_violation(oViolation) bInsideAssignment = False bAfterFound = False if isinstance(oToken, token.waveform_element.after_keyword): if isinstance(lTokens[iToken - 1], parser.whitespace): iStartIndex = iToken - 1 else: iStartIndex = iToken bAfterFound = True
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if not utils.does_token_type_exist_in_list_of_tokens(self.oMoveToken, lTokens): continue dAction = {} bPassing = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oAnchorToken in self.lAnchorTokens: if isinstance(oToken, oAnchorToken): dAction['insert'] = iToken + 1 sAnchorToken = oToken.get_value() iAnchorLine = iLine if utils.are_next_consecutive_token_types([parser.whitespace, self.oMoveToken], iToken + 1, lTokens): bPassing = True break elif isinstance(lTokens[iToken + 1], self.oMoveToken): bPassing = True break if isinstance(oToken, self.oMoveToken): iAnchorLine = iLine dAction['move_index'] = iToken sSolution = 'Move "' + oToken.get_value() + '" on line ' + str(iLine) + ' to the right of "' + sAnchorToken + '" on line ' + str(iAnchorLine) if bPassing: break else: oViolation = violation.New(iAnchorLine, oToi, sSolution) oViolation.set_action(dAction) oViolation.set_remap() self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() if utils.find_carriage_return( lTokens) is None and self.allow_single_line: for oSplitToken in self.lSplitTokens: if utils.count_token_types_in_list_of_tokens( oSplitToken, lTokens) > 1: break else: continue iLine = oToi.get_line_number() for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oSplitToken in self.lSplitTokens: if isinstance(oToken, oSplitToken): if utils.are_next_consecutive_token_types( [parser.whitespace, parser.comment], iToken + 1, lTokens): continue if utils.are_next_consecutive_token_types( [parser.comment], iToken + 1, lTokens): continue if utils.are_next_consecutive_token_types( [parser.carriage_return], iToken + 1, lTokens): continue oViolation = violation.New( iLine, oToi.extract_tokens(iToken, iToken), self.solution) self.add_violation(oViolation) break
def analyze(self, oFile): lTargetTypes = oFile.get_tokens_matching(self.lTokens) lTargetValues = [] lTargetValuesLower = [] for oTargetType in lTargetTypes: oToken = oTargetType.get_tokens()[0] lTargetValues.append(oToken.get_value()) lTargetValuesLower.append(oToken.get_value().lower()) oToi = oFile.get_all_tokens() iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if is_token_in_ignore_token_list(oToken, self.lIgnoreTokens): continue sTokenValue = oToken.get_value() sTokenValueLower = sTokenValue.lower() for sTargetValue, sTargetValueLower in zip(lTargetValues, lTargetValuesLower): if sTokenValueLower == sTargetValueLower: if sTokenValue != sTargetValue: sSolution = 'Change "' + sTokenValue + '" to "' + sTargetValue + '"' oNewToi = oToi.extract_tokens(iToken, iToken) oViolation = violation.New(iLine, oNewToi, sSolution) dAction = {} dAction['constant'] = sTargetValue dAction['found'] = sTokenValue oViolation.set_action(dAction) self.add_violation(oViolation)
def _check_first_paren_new_line(self, oToi): if self.first_paren_new_line == 'ignore': return iLine, lTokens = utils.get_toi_parameters(oToi) bSearch = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, token.constant_declaration.assignment_operator): iStart = iToken bSearch = True if isinstance(oToken, parser.open_parenthesis) and bSearch: if utils.find_carriage_return(lTokens[iStart:iToken]) is None: if self.first_paren_new_line == 'yes': sSolution = 'Move parenthesis after assignment to the next line.' oViolation = _create_violation(oToi, iLine, iToken - 1, iToken, 'first_paren_new_line', 'insert', sSolution) self.add_violation(oViolation) else: if self.first_paren_new_line == 'no': sSolution = 'Move parenthesis to same line as assignment operator.' oViolation = _create_violation(oToi, iLine, iStart, iToken, 'first_paren_new_line', 'remove', sSolution) self.add_violation(oViolation) break
def _analyze(self, lToi): if self.action == 'remove': for oToi in lToi: sSolution = self.action.capitalize() + ' ' + self.solution self.add_violation( violation.New(oToi.get_line_number(), oToi, sSolution)) return for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.does_token_type_exist_in_list_of_tokens( type(self.oInsertToken), lTokens): continue for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oSearch in self.lAnchorTokens: if isinstance(oToken, oSearch): iIndex = iToken iLineNumber = iLine sSolution = self.action.capitalize() + ' ' + self.solution oViolation = violation.New(iLineNumber, oToi.extract_tokens(iIndex, iIndex), sSolution) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.lAnchorTokens[0], self.oMoveToken) for oToi in lToi: lTokens = oToi.get_tokens() iInsert = 0 bPassing = False iLine = oToi.get_line_number() dAction = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oAnchorToken in self.lAnchorTokens: if isinstance(oToken, oAnchorToken): dAction['insert'] = iToken + 1 self.solution = 'Move "' + lTokens[-1].get_value( ) + '" to the right of "' + oToken.get_value( ) + '" on line ' + str(iLine) if isinstance(lTokens[iToken + 1], self.oMoveToken): bPassing = True break if bPassing: break else: oViolation = violation.New(iLine, oToi, self.solution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_blank_lines_above_line_starting_with_token( self.lTokens) for oToi in lToi: lTokens = oToi.get_tokens() iCount = 0 iLine = 0 for oToken in lTokens: if isinstance(oToken, parser.blank_line): iCount += 1 iLine = utils.increment_line_number(iLine, oToken) bOverride = check_if_override_exists( oFile, oToi.get_line_number() - iLine, self.lOverrides) if bOverride: iCount -= 1 if iCount > self.iAllow: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} if bOverride: dAction['index'] = 2 * (self.iAllow + 1) else: dAction['index'] = 2 * self.iAllow oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oMoveToken in self.lMoveTokens: if isinstance(oToken, oMoveToken): if lTokens[iToken - 2] is not oAnchorToken or \ lTokens[iToken - 2] is oAnchorToken and isinstance(lTokens[iToken - 1], parser.carriage_return): self.solution = 'Move "' + oToken.get_value( ) + '" to the right of "' + oAnchorToken.get_value( ) + '" on line ' + str(iMoveToLine) if isinstance(lTokens[iToken + 1], parser.whitespace): iRight = iToken + 1 else: iRight = iToken oViolation = violation.New( iLine, oToi.extract_tokens(iLeft, iRight), self.solution) self.add_violation(oViolation) break if not utils.token_is_whitespace_or_comment(oToken): iLeft = iToken + 1 iMoveToLine = iLine oAnchorToken = oToken
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): oNewToi = oToi.extract_tokens(iToken, iToken + 1) oViolation = violation.New(iLine, oNewToi, self.solution) self.add_violation(oViolation)
def _build_structure_list(iLine, iColumn, lTokens): lStructure = [] for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): continue if isinstance(oToken, parser.carriage_return): iColumn = 0 dReturn = {} dReturn['type'] = 'return' dReturn['line'] = iLine lStructure.append(dReturn) continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.close_parenthesis): dParen = {} dParen['type'] = 'close' dParen['line'] = iLine dParen['column'] = iColumn dParen['begin_line'] = utils.does_token_start_line(iToken, lTokens) lStructure.append(dParen) if isinstance(oToken, parser.open_parenthesis): dParen = {} dParen['type'] = 'open' dParen['line'] = iLine dParen['column'] = iColumn lStructure.append(dParen) if oToken.get_value().lower() == 'when': dWhen = {} dWhen['type'] = 'when' dWhen['line'] = iLine dWhen['column'] = iColumn - 4 dWhen['iToken'] = iToken lStructure.append(dWhen) if oToken.get_value().lower() == 'else': dElse = {} dElse['type'] = 'else' dElse['line'] = iLine dElse['column'] = iColumn - 4 dElse['iToken'] = iToken lStructure.append(dElse) return lStructure, iLine
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) for oToken in lTokens: iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, token.constant_keyword): iKeywordLine = iLine if isinstance(oToken, token.assignment_operator): if iKeywordLine != iLine: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) self.add_violation(oViolation)
def find_beginning_of_process_declarative_region(iLine, lTokens): iReturn = 1 iMyLine = iLine iReturnLine = iLine for iToken, oToken in enumerate(lTokens): iMyLine = utils.increment_line_number(iMyLine, oToken) if isinstance(oToken, token.close_parenthesis): iReturnLine = iMyLine iReturn = iToken + 1 if isinstance(oToken, token.is_keyword): iReturnLine = iMyLine iReturn = iToken + 1 break return iReturnLine, iReturn
def _analyze(self, lToi): oToi = lToi[0] iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens[:len(lTokens) - 2]): iLine = utils.increment_line_number(iLine, oToken) if oToken.get_value().lower() == 'std_logic_vector': if utils.are_next_consecutive_token_types( [parser.whitespace, parser.open_parenthesis], iToken + 1, lTokens): lExtractedTokens = oToi.extract_tokens(iToken, iToken + 1) oViolation = violation.New(iLine, lExtractedTokens, self.solution) self.add_violation(oViolation)
def _build_index_dict(iLine, lTokens): dReturn = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): continue if isinstance(oToken, parser.carriage_return): dReturn[iLine] = iToken + 1 continue return dReturn
def _build_actual_indent_dict(iLine, lTokens, iFirstLineIndent): dReturn = {} dReturn[iLine] = iFirstLineIndent for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): continue if isinstance(oToken, parser.carriage_return): dReturn[iLine] = _set_indent(iToken, lTokens) continue return dReturn
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lToi = self._get_tokens_of_interest(oFile) lUpdate = [] for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iComments = utils.count_token_types_in_list_of_tokens(parser.comment, lTokens) iComment = 0 for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.comment): iComment += 1 if iComment == 1: if not is_header(oToken.get_value()): break elif iComment > 1 and iComment < iComments: if not self.allow_indenting: oToken.set_indent(0) if self.comment_left is None: continue if isinstance(lTokens[iToken - 1], parser.whitespace): if not self.allow_indenting: break sHeader = '--' sHeader += self.comment_left sComment = oToken.get_value() if not sComment.startswith(sHeader): sSolution = 'Comment must start with ' + sHeader oViolation = violation.New(iLine, oToi, sSolution) self.add_violation(oViolation) if not self.allow_indenting: lUpdate.append(violation.New(0, oToi, '')) if not self.allow_indenting: oFile.update(lUpdate)
def analyze(self, oFile): lToi = oFile.get_blank_lines_below_line_ending_with_token(self.lTokens) for oToi in lToi: lTokens = oToi.get_tokens() iCount = 0 iLine = oToi.get_line_number() for oToken in lTokens: if isinstance(oToken, parser.blank_line): iCount += 1 iLine = utils.increment_line_number(iLine, oToken) bOverride = check_if_override_exists(oFile, iLine, self.lOverrides) if bOverride: iCount -= 1 if iCount > self.iAllow: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} dAction['remove'] = self.iAllow - iCount oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): if self.action == 'remove': for oToi in lToi: sSolution = self.action.capitalize() + ' ' + self.solution self.add_violation( violation.New(oToi.get_line_number(), oToi, sSolution)) return for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) bFound = False for oToken in lTokens: iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, self.insert_token): bFound = True break if not bFound: sSolution = self.action.capitalize() + ' ' + self.solution self.add_violation(violation.New(iLine, oToi, sSolution))
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) lLabels = [] for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if manage_labels(oToken, lLabels): continue if isinstance(oToken, token.for_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.for_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.for_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue if isinstance(oToken, token.if_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.if_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.if_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue if isinstance(oToken, token.case_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.case_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.case_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.lAnchorTokens[0], self.oEndToken) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.does_token_type_exist_in_list_of_tokens(type(self.oInsertToken), lTokens): continue dAction = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oSearch in self.lAnchorTokens: if isinstance(oToken, oSearch): iIndex = iToken iLineNumber = iLine sToken = oToken.get_value() sSolution = 'Add *is* keyword to the right of ' + sToken oViolation = violation.New(iLineNumber, oToi.extract_tokens(iIndex, iIndex), sSolution) self.add_violation(oViolation)
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lTargetTypes = oFile.get_tokens_matching_in_range_bounded_by_tokens( self.lTokens, self.oStart, self.oEnd) lTargetValues = [] lTargetValuesLower = [] for oTargetType in lTargetTypes: oToken = oTargetType.get_tokens()[0] lTargetValues.append(oToken.get_value()) lTargetValuesLower.append(oToken.get_value().lower()) lToi = oFile.get_tokens_bounded_by(self.oRegionStart, self.oRegionEnd) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if is_token_in_ignore_token_list(oToken, self.lIgnoreTokens): continue sTokenValue = oToken.get_value() sTokenValueLower = sTokenValue.lower() for sTargetValue, sTargetValueLower in zip( lTargetValues, lTargetValuesLower): if sTokenValueLower == sTargetValueLower: if sTokenValue != sTargetValue: sSolution = 'Change "' + sTokenValue + '" to "' + sTargetValue + '"' oNewToi = oToi.extract_tokens(iToken, iToken) oViolation = violation.New(iLine, oNewToi, sSolution) dAction = {} dAction['constant'] = sTargetValue dAction['found'] = sTokenValue oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lToi = self._get_tokens_of_interest(oFile) lUpdate = [] for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iComments = utils.count_token_types_in_list_of_tokens( parser.comment, lTokens) iComment = 0 for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.comment): iComment += 1 if iComment == iComments: if not self.allow_indenting: if isinstance(lTokens[iToken - 1], parser.whitespace): break else: oToken.set_indent(0) iWhitespace = self.indentSize * oToken.get_indent() sFooter = '--' if self.footer_left is not None: sFooter += self.footer_left iFooter_left = len(self.footer_left) else: iFooter_left = 0 if self.footer_string is None: sFooter += self.footer_left_repeat * ( self.max_footer_column - iWhitespace - len(sFooter)) elif self.footer_alignment == 'center': iLength = int( (self.max_footer_column - iWhitespace - len(self.footer_string)) / 2) - iFooter_left - 2 sFooter += self.footer_left_repeat * (iLength) sFooter += self.footer_string sFooter += self.footer_right_repeat * ( self.max_footer_column - len(sFooter)) elif self.footer_alignment == 'left': sFooter += self.footer_left_repeat sFooter += self.footer_string iLength = self.max_footer_column - iWhitespace - len( sFooter) sFooter += self.footer_right_repeat * ( self.max_footer_column - len(sFooter)) elif self.footer_alignment == 'right': iLength = self.max_footer_column - iWhitespace - len( sFooter) - len(self.footer_string) - 1 sFooter += self.footer_left_repeat * (iLength) sFooter += self.footer_string sFooter += self.footer_right_repeat sComment = oToken.get_value() try: if is_footer(sComment): if not self.allow_indenting: oToken.set_indent(0) if sComment != sFooter: sSolution = 'Change block comment footer to : ' + sFooter oViolation = violation.New( iLine, oToi, sSolution) self.add_violation(oViolation) break except IndexError: break if not self.allow_indenting: lUpdate.append(violation.New(0, oToi, '')) if not self.allow_indenting: oFile.update(lUpdate)
def analyze(self, oFile): lPreToi = oFile.get_tokens_bounded_by(self.oStart, self.oEnd) lToi = [] for oToi in lPreToi: iLine, lTokens = utils.get_toi_parameters(oToi) bInsideClockDef = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if not bInsideClockDef: if detect_clock_definition(iToken, oToken, lTokens): bInsideClockDef = True iStartIndex = iToken continue if isinstance(oToken, token.if_statement.semicolon ) and oToken.get_hierarchy() == 0: lToi.append(oToi.extract_tokens(iStartIndex, iToken)) bInsideClockDef = False continue ### jcl - need to figure out how to do this better without copying dAnalysis = {} for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() iColumn = 0 bTokenFound = False iToken = -1 for iIndex in range(0, len(lTokens)): iToken += 1 oToken = lTokens[iIndex] if not bTokenFound: for oSearch in self.lTokens: if isinstance(oToken, oSearch): bTokenFound = True dAnalysis[iLine] = {} dAnalysis[iLine]['token_column'] = iColumn dAnalysis[iLine]['token_index'] = iToken dAnalysis[iLine]['line_number'] = iLine if isinstance(lTokens[iIndex - 1], parser.whitespace): dAnalysis[iLine][ 'left_column'] = iColumn - len( lTokens[iIndex - 1].get_value()) else: dAnalysis[iLine]['left_column'] = iColumn break iColumn += len(oToken.get_value()) if isinstance(oToken, parser.carriage_return): iLine += 1 iColumn = 0 bTokenFound = False iToken = -1 if self.comment_line_ends_group: if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \ utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens): add_adjustments_to_dAnalysis( dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line( iKey) oViolation = violation.New( oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {} if self.blank_line_ends_group: if utils.are_next_consecutive_token_types( [parser.blank_line], iIndex + 1, lTokens): add_adjustments_to_dAnalysis( dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line( iKey) oViolation = violation.New( oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {} add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line(iKey) oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {}
def analyze(self, oFile): lToi = self._get_tokens_of_interest(oFile) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) # print('='*5 + str(iLine) + '='*70) iFirstLine, iFirstLineIndent = _get_first_line_info(iLine, oFile) iAssignColumn = oFile.get_column_of_token_index( oToi.get_start_index()) iColumn = iAssignColumn dActualIndent = {} dActualIndent[iLine] = iFirstLineIndent lParens = [] dIndex = {} bStartsWithParen = _starts_with_paren(lTokens) bSkipCommentLine = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): continue if bSkipCommentLine: if not isinstance(oToken, parser.carriage_return): continue if isinstance(oToken, parser.carriage_return): iColumn = 0 bSkipCommentLine = rules_utils.does_line_start_with_comment( lTokens[iToken + 1:iToken + 3]) if bSkipCommentLine: dActualIndent[iLine] = None else: dActualIndent[iLine] = _set_indent(iToken, lTokens) dIndex[iLine] = iToken + 1 continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.close_parenthesis): dParen = {} dParen['type'] = 'close' dParen['line'] = iLine dParen['column'] = iColumn dParen['begin_line'] = utils.does_token_start_line( iToken, lTokens) lParens.append(dParen) if isinstance(oToken, parser.open_parenthesis): dParen = {} dParen['type'] = 'open' dParen['line'] = iLine dParen['column'] = iColumn lParens.append(dParen) iLastLine = iLine if iFirstLine == iLastLine: continue iFirstTokenLength = len(lTokens[0].get_value()) if self.align_paren == 'no' and self.align_left == 'yes': dExpectedIndent = _analyze_align_paren_no( iFirstLine, iLastLine, lParens, self.indentSize, dActualIndent, bStartsWithParen) if self.align_paren == 'yes' and self.align_left == 'no': dExpectedIndent = _analyze_align_paren_yes_align_left_no( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn, iFirstTokenLength) if self.align_paren == 'yes' and self.align_left == 'yes': dExpectedIndent = _analyze_align_paren_yes_align_left_yes( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) if self.align_paren == 'no' and self.align_left == 'no': dExpectedIndent = _analyze_align_paren_no_align_left_no( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) # print(f'Actual = {dActualIndent}') # print(f'Expect = {dExpectedIndent}') # print(f'Index = {dIndex}') for iLine in range(iFirstLine, iLastLine + 1): if dActualIndent[iLine] is None: continue if indents_match(dActualIndent[iLine], dExpectedIndent[iLine]): continue oViolation = build_violation(iLine, oToi, iToken, dExpectedIndent, dIndex, dActualIndent) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iColumn = 0 bSignalFound = False bSkip = False dAnalysis = {} dTemp = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.carriage_return): iColumn = 0 else: iColumn += len(oToken.get_value()) bSkip = check_for_exclusions(bSkip, oToken, self.lUnless) if bSkip: continue bSignalFound = check_for_signal_declaration(bSignalFound, oToken) if not bSignalFound: iComma = 0 continue if isinstance(oToken, token.signal_declaration.colon): bSignalFound = False if iComma == 1: dAnalysis[dTemp['line_number']] = dTemp continue if isinstance(oToken, parser.comma): iComma += 1 if iComma == 2: bSignalFound = False continue dTemp = {} dTemp['comma_column'] = iColumn dTemp['comma_index'] = iToken dTemp['line_number'] = iLine if utils.are_next_consecutive_token_types([parser.whitespace, token.signal_declaration.identifier], iToken + 1, lTokens): dTemp['identifier_column'] = iColumn + len(lTokens[iToken + 1].get_value()) dTemp['token_index'] = iToken + 2 dTemp['token_value'] = lTokens[iToken + 2].get_value() elif utils.are_next_consecutive_token_types([token.signal_declaration.identifier], iToken + 1, lTokens): dTemp['identifier_column'] = iColumn + 1 dTemp['token_index'] = iToken + 1 dTemp['token_value'] = lTokens[iToken + 1].get_value() else: bSignalFound = False add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oToi.extract_tokens(dAnalysis[iKey]['comma_index'], dAnalysis[iKey]['token_index']) sSolution = 'Move ' + dAnalysis[iKey]['token_value'] + ' ' + str(dAnalysis[iKey]['adjust']) + ' columns' oViolation = violation.New(dAnalysis[iKey]['line_number'], oLineTokens, sSolution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {}
def analyze(self, oFile): lToi = self._get_tokens_of_interest(oFile) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) # print('='*5 + str(iLine) + '='*70) iFirstLine, iFirstLineIndent = _get_first_line_info(iLine, oFile) iFirstColumn, iNextColumn, iLastColumn = _find_first_column( oFile, oToi, self.align_left, iFirstLineIndent, self.indentSize) iAssignColumn = oFile.get_column_of_token_index( oToi.get_start_index()) iColumn = iAssignColumn dActualIndent = {} dActualIndent[iLine] = iFirstLineIndent lParens = [] dIndex = {} bStartsWithParen = _starts_with_paren(lTokens) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.blank_line): continue if isinstance(oToken, parser.carriage_return): iColumn = 0 dActualIndent[iLine] = _set_indent(iToken, lTokens) dIndex[iLine] = iToken + 1 continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.close_parenthesis): dParen = {} dParen['type'] = 'close' dParen['line'] = iLine dParen['column'] = iColumn dParen['begin_line'] = utils.does_token_start_line( iToken, lTokens) lParens.append(dParen) if isinstance(oToken, parser.open_parenthesis): dParen = {} dParen['type'] = 'open' dParen['line'] = iLine dParen['column'] = iColumn lParens.append(dParen) iLastLine = iLine if iFirstLine == iLastLine: continue if not self.align_paren and self.align_left: dExpectedIndent = _analyze_align_paren_false( iFirstLine, iLastLine, lParens, self.indentSize, dActualIndent, bStartsWithParen) if self.align_paren and not self.align_left: dExpectedIndent = _analyze_align_paren_true( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) if self.align_paren and self.align_left: dExpectedIndent = _analyze_align_paren_true_align_left_true( iFirstLine, iLastLine, lParens, dActualIndent, self.indentSize, bStartsWithParen, iAssignColumn) # print(f'Actual = {dActualIndent}') # print(f'Expect = {dExpectedIndent}') # print(f'Index = {dIndex}') for iLine in range(iFirstLine, iLastLine + 1): if dActualIndent[iLine] == dExpectedIndent[iLine]: continue dAction = {} dAction['line'] = iLine dAction['column'] = dExpectedIndent[iLine] if dActualIndent[iLine] > 0: dAction['action'] = 'adjust' else: dAction['action'] = 'insert' sSolution = 'Adjust indent to column ' + str( dExpectedIndent[iLine]) iToken = dIndex[iLine] oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = [] for lTokenPair in self.lTokenPairs: aToi = oFile.get_tokens_bounded_by(lTokenPair[0], lTokenPair[1], bExcludeLastToken=self.bExcludeLastToken) lToi = utils.combine_two_token_class_lists(lToi, aToi) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.are_next_consecutive_token_types_ignoring_whitespace([parser.open_parenthesis], 1, lTokens): continue iStartColumn = calculate_start_column(oFile, oToi) lColumn = [] lColumn.append(iStartColumn) bCheckAlignment = False iFirstColumn = oFile.get_column_of_token_index(oToi.get_start_index()) iColumn = iFirstColumn iPreviousColumn = 0 iIndent = 0 # print('-'*80) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.carriage_return): bCheckAlignment = True iPreviousColumn = lColumn[-1] iColumn = 0 if isinstance(lTokens[iToken + 1], parser.whitespace): iIndent = len(lTokens[iToken + 1].get_value()) else: iIndent = 0 continue if isinstance(oToken, parser.blank_line): bCheckAlignment = False continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.open_parenthesis): lColumn.append(iColumn + iPreviousColumn - iIndent) if isinstance(oToken, parser.close_parenthesis): lColumn.pop() if bCheckAlignment: if isinstance(oToken, parser.whitespace): if len(oToken.get_value()) != lColumn[-1]: dAction = {} dAction['line'] = iLine dAction['column'] = lColumn[-1] dAction['action'] = 'adjust' dAction['indent'] = iIndent dAction['previous'] = iPreviousColumn oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), self.solution) oViolation.set_action(dAction) self.add_violation(oViolation) # print(dAction) else: if lColumn != 0: dAction = {} if isinstance(oToken, parser.open_parenthesis): dAction['column'] = lColumn[-2] else: dAction['column'] = lColumn[-1] dAction['action'] = 'insert' oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), self.solution) oViolation.set_action(dAction) self.add_violation(oViolation) bCheckAlignment = False
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.lTokenPairs[0][0], self.lTokenPairs[0][1]) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) iStartColumn = calculate_start_column(oFile, oToi) lColumn = [] lColumn.append(iStartColumn) iColumn = oFile.get_column_of_token_index(oToi.get_start_index()) iIndent = 0 bWaveform = True bCondition = False iPreviousColumn = 0 for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.carriage_return): iPreviousColumn = lColumn[-1] iColumn = 0 iIndent = calculate_indent(iToken, lTokens) oNextToken = lTokens[iToken + 1] if bWaveform: iAdjustIndex = -1 if isinstance(oNextToken, parser.whitespace): if len(oNextToken.get_value()) != iStartColumn: oViolation = build_violation( iLine, iStartColumn, 'adjust', oToi, iToken) self.add_violation(oViolation) else: oViolation = build_violation( iLine, iStartColumn, 'insert', oToi, iToken) self.add_violation(oViolation) if bCondition: if isinstance(oNextToken, parser.whitespace): oSecondToken = lTokens[iToken + 2] if isinstance(oSecondToken, parser.close_parenthesis): iAdjustIndex = -2 else: iAdjustIndex = -1 if len(oNextToken.get_value() ) != lColumn[iAdjustIndex]: oViolation = build_violation( iLine, lColumn[iAdjustIndex], 'adjust', oToi, iToken) self.add_violation(oViolation) else: if isinstance(oNextToken, parser.close_parenthesis): iAdjustIndex = -2 else: iAdjustIndex = -1 oViolation = build_violation( iLine, lColumn[iAdjustIndex], 'insert', oToi, iToken) self.add_violation(oViolation) continue iColumn += len(oToken.get_value()) if isinstance(oToken, token.conditional_waveforms.when_keyword): bWaveform = False bCondition = True lColumn.append(iColumn + 1) if isinstance(oToken, token.conditional_waveforms.else_keyword): bWaveform = True bCondition = False if isinstance(oToken, parser.open_parenthesis): lColumn.append(iColumn + iPreviousColumn - iIndent) if isinstance(oToken, parser.close_parenthesis): lColumn.pop()