def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() for iToken, oToken in enumerate(lTokens): if isinstance(oToken, parser.carriage_return): iLine += 1 for oSearchToken in self.lTokens: if utils.are_next_consecutive_token_types([parser.whitespace, oSearchToken], iToken + 1, lTokens) or \ utils.are_next_consecutive_token_types([oSearchToken], iToken + 1, lTokens): oViolation = violation.New(iLine, oToi, self.solution) dAction = {} dAction['remove_to_index'] = iToken + 1 oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() for iToken, oToken in enumerate(lTokens): for oSearchToken in self.lTokens: if isinstance(oToken, oSearchToken): oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} if isinstance(lTokens[iToken - 1], parser.whitespace): dAction['index'] = iToken - 1 else: dAction['index'] = iToken oViolation.set_action(dAction) self.add_violation(oViolation) break
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() # print(lTokens) if utils.are_next_consecutive_token_types([parser.carriage_return], 1, lTokens): continue if utils.are_next_consecutive_token_types( [parser.whitespace, parser.comment], 1, lTokens): continue if utils.are_next_consecutive_token_types([parser.comment], 1, lTokens): continue else: self.add_violation( violation.New(oToi.get_line_number(), oToi, self.solution))
def _analyze_no_code(self, lToi, lAllowTokens): for oToi in lToi: lTokens = oToi.get_tokens() if _is_allowed_token(lAllowTokens, lTokens): continue if len(lTokens) == 1: if isinstance(lTokens[0], parser.blank_line) or isinstance(lTokens[0], parser.comment): continue elif len(lTokens) == 2: if isinstance(lTokens[0], parser.whitespace) and isinstance(lTokens[1], parser.comment): continue oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} dAction['action'] = 'Insert' oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): lNewToi = [] for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) bInsideAssignment = False bResetFound = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if detect_clock_definition(iToken, oToken, lTokens): if bResetFound: lNewToi.append(oToi.extract_tokens(iStartIndex, iToken)) break if isinstance(oToken, token.if_statement.if_keyword) and oToken.get_hierarchy() == 0: iStartIndex = iToken bResetFound = True for oToi in lNewToi: iLine, lTokens = utils.get_toi_parameters(oToi) bAfterFound = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if not bInsideAssignment: if detect_signal_assignment(oToken): bInsideAssignment = True continue if bAfterFound: if detect_end_signal_assignment(oToken): oNewToi = oToi.extract_tokens(iStartIndex, iToken) sSolution = 'Remove *after* from signals in reset portion of a clock process' oViolation = violation.New(iLine, oNewToi, sSolution) self.add_violation(oViolation) bInsideAssignment = False bAfterFound = False if isinstance(oToken, token.waveform_element.after_keyword): if isinstance(lTokens[iToken - 1], parser.whitespace): iStartIndex = iToken - 1 else: iStartIndex = iToken bAfterFound = True
def _analyze_require_blank_line(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() if not are_there_process_declarative_items(lTokens): continue iLine, iSearch = find_beginning_of_process_declarative_region(oToi.get_line_number(), lTokens) if does_a_blank_line_exist(iSearch, lTokens): continue dAction = {} dAction['action'] = 'Insert' dAction['index'] = find_carriage_return(iSearch, lTokens) + 1 oViolation = violation.New(iLine, oToi, self.solution) oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): lPrefixLower = [] for sPrefix in self.prefixes: lPrefixLower.append(sPrefix.lower()) for oToi in lToi: lTokens = oToi.get_tokens() sToken = lTokens[0].get_value().lower() bValid = False for sPrefix in lPrefixLower: if sToken.startswith(sPrefix.lower()): bValid = True if not bValid: sSolution = 'Prefix ' + lTokens[0].get_value( ) + ' with one of the following: ' + ', '.join(self.prefixes) oViolation = violation.New(oToi.get_line_number(), oToi, sSolution) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_interface_elements_between_tokens( self.oStart, self.oEnd) for oToi in lToi: lTokens = oToi.get_tokens() for iToken, oToken in enumerate(lTokens): for oSearchToken in self.lTokens: if isinstance(oToken, oSearchToken): oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} if isinstance(lTokens[iToken - 1], parser.whitespace): dAction['index'] = iToken - 1 else: dAction['index'] = iToken oViolation.set_action(dAction) self.add_violation(oViolation) break
def analyze(self, oFile): lKeywords = oFile.get_tokens_matching( [token.process_statement.process_keyword]) lLabels = oFile.get_tokens_matching( [token.process_statement.process_label]) iPreviousIndex = 0 for oKeyword in lKeywords: iCurrentIndex = oKeyword.get_start_index() for oLabel in lLabels: iLabelIndex = oLabel.get_start_index() if iPreviousIndex < iLabelIndex and iLabelIndex < iCurrentIndex: break else: oViolation = violation.New(oKeyword.get_line_number(), oKeyword, self.solution) self.add_violation(oViolation) iPreviousIndex = iCurrentIndex
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() if utils.are_next_consecutive_token_types([parser.carriage_return], 1, lTokens): continue if utils.are_next_consecutive_token_types( [parser.whitespace, parser.comment], 1, lTokens): continue if utils.are_next_consecutive_token_types([parser.comment], 1, lTokens): continue for oToken in lTokens: if isinstance(oToken, self.oSameLineToken): break else: self.add_violation( violation.New(oToi.get_line_number(), oToi, self.solution))
def analyze(self, oFile): lToi = oFile.get_blank_lines_below_line_ending_with_token(self.lTokens) for oToi in lToi: lTokens = oToi.get_tokens() iCount = 0 iLine = oToi.get_line_number() for oToken in lTokens: if isinstance(oToken, parser.blank_line): iCount += 1 iLine = utils.increment_line_number(iLine, oToken) bOverride = check_if_override_exists(oFile, iLine, self.lOverrides) if bOverride: iCount -= 1 if iCount > self.iAllow: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} dAction['remove'] = self.iAllow - iCount oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = [] lToi = oFile.get_n_tokens_before_and_after_tokens(1, self.lTokens) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) dAction = {} oLeft = lTokens[0] if not isinstance(oLeft, parser.carriage_return): if isinstance(oLeft, parser.whitespace): if self.bNIsMinimum: if self.iSpaces > len(oLeft.get_value()): dAction['left'] = {} dAction['left']['action'] = 'adjust' elif self.iSpaces != len(oLeft.get_value()): dAction['left'] = {} dAction['left']['action'] = 'adjust' else: dAction['left'] = {} dAction['left']['action'] = 'insert' oRight = lTokens[-1] if not isinstance(oRight, parser.carriage_return): if isinstance(oRight, parser.whitespace): if self.bNIsMinimum: if self.iSpaces > len(oRight.get_value()): dAction['right'] = {} dAction['right']['action'] = 'adjust' elif self.iSpaces != len(oLeft.get_value()): dAction['right'] = {} dAction['right']['action'] = 'adjust' else: dAction['right'] = {} dAction['right']['action'] = 'insert' if len(list(dAction.keys())) > 0: sSolution = create_solution_text(dAction, self.iSpaces, lTokens) oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(token.process_keyword, token.begin_keyword) for oToi in lToi: lTokens = oToi.get_tokens() if not are_there_process_declarative_items(lTokens): continue iLine, iSearch, oLeftToken = find_beginning_of_process_declarative_region( oToi.get_line_number(), lTokens) if does_a_blank_line_exist(iSearch, lTokens): continue dAction = {} dAction['insert'] = find_carriage_return(iSearch, lTokens) + 1 oViolation = violation.New(iLine, oToi, self.solution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): if self.lHierarchyLimits is None: lToi = oFile.get_line_below_line_ending_with_token(self.lTokens) else: lToi = oFile.get_line_below_line_ending_with_token_with_hierarchy(self.lTokens, self.lHierarchyLimits) for oToi in lToi: lTokens = oToi.get_tokens() if len(lTokens) == 1: if isinstance(lTokens[0], parser.blank_line): continue if isinstance(lTokens[0], parser.comment): continue elif len(lTokens) == 2: if isinstance(lTokens[0], parser.whitespace) and isinstance(lTokens[1], parser.comment): continue else: oViolation = violation.New(oToi.get_line_number() - 1, oToi, self.solution) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() for iToken, oToken in enumerate(lTokens): if isinstance(oToken, self.anchor_token): iStartIndex = iToken if isinstance(oToken, self.token_to_move): iMoveIndex = iToken if not (iStartIndex + 2 == iMoveIndex and isinstance( lTokens[iStartIndex + 1], parser.whitespace)): oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} dAction['insertIndex'] = iStartIndex + 1 dAction['moveIndex'] = iMoveIndex oViolation.set_action(dAction) oViolation.set_remap() oViolation.fix_blank_lines = True self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by( token.process_statement.open_parenthesis, token.process_statement.close_parenthesis) for oToi in lToi: lTokens = oToi.get_tokens() lTokens = utils.remove_whitespace_from_token_list(lTokens)[1:-1] lNewList = [] iSignals = 0 oPrevToken = parser.whitespace(' ') oSignal = None for oToken in lTokens: if isinstance(oToken, token.sensitivity_list.comma): lNewList.append(parser.todo('signal name')) iSignals += 1 oSignal = None elif isinstance(oToken, parser.carriage_return): if oSignal is not None: lNewList.append(parser.todo('signal name')) iSignals += 1 lNewList.append(oToken) else: oSignal = parser.todo('signal name') else: if oSignal is not None and not isinstance( oToken, parser.carriage_return): lNewList.append(parser.todo('signal name')) iSignals += 1 if iSignals > 1: for iToken, oToken in enumerate(lNewList): if iToken == 0: continue if isinstance(lNewList[iToken - 1], parser.todo) and isinstance( oToken, parser.todo): break else: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.lAnchorTokens[0], self.oEndToken) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.does_token_type_exist_in_list_of_tokens(type(self.oInsertToken), lTokens): continue dAction = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oSearch in self.lAnchorTokens: if isinstance(oToken, oSearch): iIndex = iToken iLineNumber = iLine sToken = oToken.get_value() sSolution = 'Add *is* keyword to the right of ' + sToken oViolation = violation.New(iLineNumber, oToi.extract_tokens(iIndex, iIndex), sSolution) self.add_violation(oViolation)
def analyze(self, oFile): self._print_debug_message('Analyzing rule: ' + self.unique_id) lTargetTypes = oFile.get_tokens_matching_in_range_bounded_by_tokens( self.lTokens, self.oStart, self.oEnd) lTargetValues = [] lTargetValuesLower = [] for oTargetType in lTargetTypes: oToken = oTargetType.get_tokens()[0] lTargetValues.append(oToken.get_value()) lTargetValuesLower.append(oToken.get_value().lower()) lToi = oFile.get_tokens_bounded_by(self.oRegionStart, self.oRegionEnd) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if is_token_in_ignore_token_list(oToken, self.lIgnoreTokens): continue sTokenValue = oToken.get_value() sTokenValueLower = sTokenValue.lower() for sTargetValue, sTargetValueLower in zip( lTargetValues, lTargetValuesLower): if sTokenValueLower == sTargetValueLower: if sTokenValue != sTargetValue: sSolution = 'Change "' + sTokenValue + '" to "' + sTargetValue + '"' oNewToi = oToi.extract_tokens(iToken, iToken) oViolation = violation.New(iLine, oNewToi, sSolution) dAction = {} dAction['constant'] = sTargetValue dAction['found'] = sTokenValue oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze_no_blank_line(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() if not are_there_process_declarative_items(lTokens): continue iLine, iSearch = find_beginning_of_process_declarative_region(oToi.get_line_number(), lTokens) if not does_a_blank_line_exist(iSearch, lTokens): continue dAction = {} dAction['action'] = 'Remove' dAction['start'] = iSearch for iToken, oToken in enumerate(lTokens[iSearch:]): if isinstance(oToken, parser.carriage_return): if not isinstance(lTokens[iSearch + iToken + 1], parser.blank_line): dAction['end'] = iSearch + iToken - 1 break oViolation = violation.New(iLine, oToi, self.solution) oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze_no_blank_line(self, lToi): sSolution = 'Remove blank line(s) above *begin* keyword' for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) lTokens.reverse() if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.is_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.close_parenthesis], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.process_keyword], 0, lTokens): continue if not utils.are_next_consecutive_token_types([token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line], 0, lTokens) and \ not utils.are_next_consecutive_token_types([token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens): continue dAction = {} dAction['action'] = 'Remove' if isinstance(lTokens[1], parser.whitespace): iEnd = len(lTokens) - 2 else: iEnd = len(lTokens) - 3 for iToken, oToken in enumerate(lTokens): if isinstance(oToken, parser.carriage_return): if not isinstance(lTokens[iToken + 1], parser.carriage_return): iStart = len(lTokens) - iToken - 2 break lTokens.reverse() dAction['start'] = iStart dAction['end'] = iEnd oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = get_tokens_of_interest(oFile, self.lTokenPairs) for oToi in lToi: lTokens = oToi.get_tokens() if utils.find_carriage_return( lTokens) is None and self.allow_single_line: for oSplitToken in self.lSplitTokens: bBreak = False if utils.count_token_types_in_list_of_tokens( oSplitToken, lTokens) > 1: bBreak = True break else: continue iLine = oToi.get_line_number() for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oSplitToken in self.lSplitTokens: if isinstance(oToken, oSplitToken): if utils.are_next_consecutive_token_types( [parser.whitespace, parser.comment], iToken + 1, lTokens): continue if utils.are_next_consecutive_token_types( [parser.comment], iToken + 1, lTokens): continue if utils.are_next_consecutive_token_types( [parser.carriage_return], iToken + 1, lTokens): continue oViolation = violation.New( iLine, oToi.extract_tokens(iToken, iToken), self.solution) self.add_violation(oViolation) break
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if not utils.does_token_type_exist_in_list_of_tokens( self.oMoveToken, lTokens): continue dAction = {} bPassing = False for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oAnchorToken in self.lAnchorTokens: if isinstance(oToken, oAnchorToken): dAction['insert'] = iToken + 1 sAnchorToken = oToken.get_value() iAnchorLine = iLine if utils.are_next_consecutive_token_types( [parser.whitespace, self.oMoveToken], iToken + 1, lTokens): bPassing = True break elif isinstance(lTokens[iToken + 1], self.oMoveToken): bPassing = True break if isinstance(oToken, self.oMoveToken): iAnchorLine = iLine dAction['move_index'] = iToken sSolution = 'Move "' + oToken.get_value( ) + '" on line ' + str( iLine ) + ' to the right of "' + sAnchorToken + '" on line ' + str( iAnchorLine) if bPassing: break else: oViolation = violation.New(iAnchorLine, oToi, sSolution) oViolation.set_action(dAction) oViolation.set_remap() self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() bPassing = False iLine = oToi.get_line_number() dAction = {} for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) for oAnchorToken in self.lAnchorTokens: if isinstance(oToken, oAnchorToken): dAction['insert'] = iToken + 1 self.solution = 'Move "' + lTokens[-1].get_value() + '" to the right of "' + oToken.get_value() + '" on line ' + str(iLine) if isinstance(lTokens[iToken + 1], self.oMoveToken): bPassing = True break if bPassing: break else: oViolation = violation.New(iLine, oToi, self.solution) oViolation.set_action(dAction) oViolation.set_remap() oViolation.fix_blank_lines = True self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() for iToken, oToken in enumerate(lTokens): if isinstance(oToken, parser.carriage_return): iLine += 1 if isinstance(oToken, parser.comment): if isinstance(lTokens[iToken + 1], parser.carriage_return): if isinstance(lTokens[iToken - 1], parser.carriage_return) or \ isinstance(lTokens[iToken - 2], parser.carriage_return): continue else: if isinstance(lTokens[iToken - 1], parser.whitespace): oNewToi = oToi.extract_tokens( iToken - 1, iToken) else: oNewToi = oToi.extract_tokens(iToken, iToken) oViolation = violation.New( oNewToi.get_line_number(), oNewToi, self.solution) self.add_violation(oViolation)
def analyze(self, oFile): lTargetTypes = oFile.get_tokens_matching(self.lTokens) lToi = oFile.get_tokens_matching([parser.item]) for oToi in lToi: lTokens = oToi.get_tokens() for oToken in lTokens: if is_token_in_ignore_token_list(oToken, self.lIgnoreTokens): continue for oTargetType in lTargetTypes: sTokenValue = oToken.get_value() sTargetType = oTargetType.get_tokens()[0].get_value() if sTokenValue.lower() == sTargetType.lower(): if sTokenValue != sTargetType: sSolution = 'Change "' + sTokenValue + '" to "' + sTargetType + '"' oViolation = violation.New(oToi.get_line_number(), oToi, sSolution) dAction = {} dAction['constant'] = sTargetType dAction['found'] = sTokenValue oViolation.set_action(dAction) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() lTokens = utils.remove_whitespace_from_token_list(lTokens)[1:-1] lNewList = [] iSignals = 0 oSignal = None for oToken in lTokens: if isinstance(oToken, token.sensitivity_list.comma): lNewList.append(parser.todo('signal name')) iSignals += 1 oSignal = None elif isinstance(oToken, parser.carriage_return): if oSignal is not None: lNewList.append(parser.todo('signal name')) iSignals += 1 lNewList.append(oToken) else: oSignal = parser.todo('signal name') if oSignal is not None and not isinstance(oToken, parser.carriage_return): lNewList.append(parser.todo('signal name')) iSignals += 1 if iSignals > 1: for iToken, oToken in enumerate(lNewList): if iToken == 0: continue if isinstance(lNewList[iToken - 1], parser.todo) and isinstance( oToken, parser.todo): break else: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) self.add_violation(oViolation)
def _analyze(self, lToi): for oToi in lToi: sSolution = self.action.capitalize() + ' ' + self.solution self.add_violation( violation.New(oToi.get_line_number(), oToi, sSolution))
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(self.left_token, self.right_token) for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() iColumn = 0 bTokenFound = False iToken = -1 bSkip = False oEndSkipToken = None dAnalysis = {} for iIndex in range(0, len(lTokens)): iToken += 1 oToken = lTokens[iIndex] bSkip, oEndSkipToken = check_for_exclusions( oToken, bSkip, oEndSkipToken, self.lUnless) if not bTokenFound and not bSkip: for oSearch in self.lTokens: if isinstance(oToken, oSearch): bTokenFound = True dAnalysis[iLine] = {} dAnalysis[iLine]['token_column'] = iColumn dAnalysis[iLine]['token_index'] = iToken dAnalysis[iLine]['line_number'] = iLine dAnalysis[iLine]['token_value'] = oToken.get_value( ) if isinstance(lTokens[iIndex - 1], parser.whitespace): dAnalysis[iLine][ 'left_column'] = iColumn - len( lTokens[iIndex - 1].get_value()) else: dAnalysis[iLine]['left_column'] = iColumn break iColumn += len(oToken.get_value()) if isinstance(oToken, parser.carriage_return): iLine += 1 iColumn = 0 bTokenFound = False iToken = -1 if self.comment_line_ends_group: if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \ utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens): add_adjustments_to_dAnalysis( dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line( iKey) sSolution = 'Move ' + dAnalysis[iKey][ 'token_value'] + ' ' + str( dAnalysis[iKey] ['adjust']) + ' columns' oViolation = violation.New( oLineTokens.get_line_number(), oLineTokens, sSolution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {} if self.blank_line_ends_group: if utils.are_next_consecutive_token_types( [parser.blank_line], iIndex + 1, lTokens): add_adjustments_to_dAnalysis( dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line( iKey) sSolution = 'Move ' + dAnalysis[iKey][ 'token_value'] + ' ' + str( dAnalysis[iKey] ['adjust']) + ' columns' oViolation = violation.New( oLineTokens.get_line_number(), oLineTokens, sSolution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {} if self.if_control_statements_ends_group: if check_for_if_keywords(iIndex + 1, lTokens): add_adjustments_to_dAnalysis( dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line( iKey) sSolution = 'Move ' + dAnalysis[iKey][ 'token_value'] + ' ' + str( dAnalysis[iKey] ['adjust']) + ' columns' oViolation = violation.New( oLineTokens.get_line_number(), oLineTokens, sSolution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {} if self.case_control_statements_ends_group: if check_for_case_keywords(iIndex + 1, lTokens): add_adjustments_to_dAnalysis( dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line( iKey) sSolution = 'Move ' + dAnalysis[iKey][ 'token_value'] + ' ' + str( dAnalysis[iKey] ['adjust']) + ' columns' oViolation = violation.New( oLineTokens.get_line_number(), oLineTokens, sSolution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {} add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line(iKey) sSolution = 'Move ' + dAnalysis[iKey][ 'token_value'] + ' ' + str( dAnalysis[iKey]['adjust']) + ' columns' oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, sSolution) oViolation.set_action(dAnalysis[iKey]) self.add_violation(oViolation) dAnalysis = {}
def analyze(self, oFile): lIncludeLines = [] if not self.blank_line_ends_group: lIncludeLines.append(parser.blank_line) if not self.comment_line_ends_group: lIncludeLines.append(parser.comment) dAnalysis = {} lToi = oFile.get_tokens_bounded_by(self.left_token, self.right_token) for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() iColumn = 0 bTokenFound = False iToken = -1 for iIndex in range(0, len(lTokens)): iToken += 1 oToken = lTokens[iIndex] if not bTokenFound: for oSearch in self.lTokens: if isinstance(oToken, oSearch): bTokenFound = True dAnalysis[iLine] = {} dAnalysis[iLine]['token_column'] = iColumn dAnalysis[iLine]['token_index'] = iToken dAnalysis[iLine]['line_number'] = iLine if isinstance(lTokens[iIndex -1], parser.whitespace): dAnalysis[iLine]['left_column'] = iColumn - len(lTokens[iIndex - 1].get_value()) else: dAnalysis[iLine]['left_column'] = iColumn break iColumn += len(oToken.get_value()) if isinstance(oToken, token.generic_clause.semicolon) and self.separate_generic_port_alignment: add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line(iKey) oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.violations.append(oViolation) dAnalysis = {} if isinstance(oToken, token.generic_map_aspect.close_parenthesis) and self.separate_generic_port_alignment: add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line(iKey) oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.violations.append(oViolation) dAnalysis = {} if isinstance(oToken, parser.carriage_return): iLine += 1 iColumn = 0 bTokenFound = False iToken = -1 if self.comment_line_ends_group: if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \ utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens): add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line(iKey) oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.violations.append(oViolation) dAnalysis = {} if self.blank_line_ends_group: if utils.are_next_consecutive_token_types([parser.blank_line], iIndex + 1, lTokens): add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line(iKey) oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.violations.append(oViolation) dAnalysis = {} add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment) for iKey in list(dAnalysis.keys()): if dAnalysis[iKey]['adjust'] != 0: oLineTokens = oFile.get_tokens_from_line(iKey) oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution) oViolation.set_action(dAnalysis[iKey]) self.violations.append(oViolation) dAnalysis = {}