def _analyze_require_blank_line(self, lToi): sSolution = 'Add blank line above *begin* keyword' dAction = {} dAction['action'] = 'Insert' for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) for iToken, oToken in enumerate(lTokens): if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.process_keyword, token.begin_keyword], iToken, lTokens): if not blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.close_parenthesis, token.begin_keyword], iToken, lTokens): if not blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.is_keyword, token.begin_keyword], iToken, lTokens): if not blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) break
def _analyze_require_blank_line(self, lToi): sSolution = 'Insert blank line above *begin* keyword' for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) lTokens.reverse() if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.is_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.close_parenthesis], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.process_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types([ token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line ], 0, lTokens): continue if utils.are_next_consecutive_token_types( [token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens): continue dAction = {} dAction['action'] = 'Insert' if isinstance(lTokens[1], parser.whitespace): dAction['index'] = len(lTokens) - 2 else: dAction['index'] = len(lTokens) - 1 lTokens.reverse() oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(token.process_keyword, token.begin_keyword) for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) lTokens.reverse() if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.is_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.close_parenthesis], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.process_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types([token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line], 0, lTokens): continue if utils.are_next_consecutive_token_types([token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens): continue dAction = {} if isinstance(lTokens[1], parser.whitespace): dAction['insert'] = len(lTokens) - 2 else: dAction['insert'] = len(lTokens) - 1 lTokens.reverse() oViolation = violation.New(iLine, oToi, self.solution) oViolation.set_action(dAction) self.add_violation(oViolation)
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by(token.process_keyword, token.begin_keyword) for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns( lTokens) for iToken, oToken in enumerate(lTokens): if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.process_keyword, token.begin_keyword], iToken, lTokens): if blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, self.solution) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.close_parenthesis, token.begin_keyword], iToken, lTokens): if blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, self.solution) self.add_violation(oViolation) break if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.is_keyword, token.begin_keyword], iToken, lTokens): if blank_lines_exist(iToken, lTokens): oViolation = violation.New(iLine, oToi, self.solution) self.add_violation(oViolation) break
def are_there_process_declarative_items(lTokens): for iToken, oToken in enumerate(lTokens): if utils.are_next_consecutive_token_types_ignoring_whitespace([token.process_keyword, token.begin_keyword], iToken, lTokens): return False if utils.are_next_consecutive_token_types_ignoring_whitespace([token.close_parenthesis, token.begin_keyword], iToken, lTokens): return False if utils.are_next_consecutive_token_types_ignoring_whitespace([token.is_keyword, token.begin_keyword], iToken, lTokens): return False return True
def analyze(self, oFile): lToi = oFile.get_tokens_bounded_by_token_when_between_tokens(lElsifBoundingTokens[0], lElsifBoundingTokens[1], oStart, oEnd) for oToi in lToi: lTokens = oToi.get_tokens() bEventFound = False iStartIndex = 0 for iToken, oToken in enumerate(lTokens): if self.clock == 'edge': if utils.are_next_consecutive_token_types_ignoring_whitespace([parser.tic, parser.event_keyword, token.logical_operator.and_operator, None, token.relational_operator.equal], iToken + 1, lTokens): bEventFound = True iStartIndex = iToken sSolution = 'Change event to rising_edge/falling_edge.' dAction = {} dAction['convert_to'] = 'edge' dAction['clock'] = oToken.get_value() if bEventFound and isinstance(oToken, parser.character_literal): if oToken.get_value() == "'1'": dAction['edge'] = 'rising_edge' else: dAction['edge'] = 'falling_edge' oMyToi = oToi.extract_tokens(iStartIndex, iToken) oViolation = violation.New(oToi.get_line_number(), oMyToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) bEventFound = False elif self.clock == 'event': if isinstance(oToken, token.ieee.std_logic_1164.function.rising_edge): sSolution = 'Change rising_edge to event format.' iStartIndex = iToken dAction = {} dAction['convert_to'] = 'event' dAction['edge'] = "'1'" bEventFound = True elif isinstance(oToken, token.ieee.std_logic_1164.function.falling_edge): sSolution = 'Change falling_edge to event format.' iStartIndex = iToken dAction = {} dAction['convert_to'] = 'event' dAction['edge'] = "'0'" bEventFound = True if bEventFound and isinstance(oToken, parser.todo): dAction['clock'] = oToken.get_value() if bEventFound and isinstance(oToken, parser.close_parenthesis): oMyToi = oToi.extract_tokens(iStartIndex, iToken) oViolation = violation.New(oToi.get_line_number(), oMyToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) bEventFound = False else: sys.stderr.write('Invalid configuration option ' + self.clock) exit(1)
def _analyze(self, lToi): for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) lLabels = [] for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if manage_labels(oToken, lLabels): continue if isinstance(oToken, token.for_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.for_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.for_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue if isinstance(oToken, token.if_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.if_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.if_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue if isinstance(oToken, token.case_generate_statement.end_generate_keyword): if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.case_generate_statement.end_generate_label], iToken + 1, lTokens): oNewToi = oToi.extract_tokens(iToken, iToken) dAction = {} dAction['label'] = token.case_generate_statement.end_generate_label(lLabels[-1].get_value()) sSolution = 'Add label ' + lLabels[-1].get_value() oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation) continue
def _is_open_paren_after_assignment(oToi): iLine, lTokens = utils.get_toi_parameters(oToi) for iToken, oToken in enumerate(lTokens): if isinstance(oToken, token.constant_declaration.assignment_operator): if utils.are_next_consecutive_token_types_ignoring_whitespace([ token.constant_declaration.assignment_operator, parser.open_parenthesis ], iToken, lTokens): return True return False
def _analyze_no_blank_line(self, lToi): sSolution = 'Remove blank line(s) above *begin* keyword' for oToi in lToi: lTokens = oToi.get_tokens() iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens) lTokens.reverse() if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.is_keyword], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.close_parenthesis], 0, lTokens): continue if utils.are_next_consecutive_token_types_ignoring_whitespace( [token.begin_keyword, token.process_keyword], 0, lTokens): continue if not utils.are_next_consecutive_token_types([token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line], 0, lTokens) and \ not utils.are_next_consecutive_token_types([token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens): continue dAction = {} dAction['action'] = 'Remove' if isinstance(lTokens[1], parser.whitespace): iEnd = len(lTokens) - 2 else: iEnd = len(lTokens) - 3 for iToken, oToken in enumerate(lTokens): if isinstance(oToken, parser.carriage_return): if not isinstance(lTokens[iToken + 1], parser.carriage_return): iStart = len(lTokens) - iToken - 2 break lTokens.reverse() dAction['start'] = iStart dAction['end'] = iEnd oViolation = violation.New(iLine, oToi, sSolution) oViolation.set_action(dAction) self.add_violation(oViolation)
def detect_clock_definition(iToken, oToken, lTokens): if isinstance(oToken, token.if_statement.if_keyword) or isinstance(oToken, token.if_statement.elsif_keyword): if oToken.get_hierarchy() != 0: return False if utils.are_next_consecutive_token_types_ignoring_whitespace([parser.open_parenthesis, token.ieee.std_logic_1164.function.rising_edge], iToken + 1, lTokens) or \ utils.are_next_consecutive_token_types_ignoring_whitespace([token.ieee.std_logic_1164.function.rising_edge], iToken + 1, lTokens) or \ utils.are_next_consecutive_token_types_ignoring_whitespace([parser.open_parenthesis, token.ieee.std_logic_1164.function.falling_edge], iToken + 1, lTokens) or \ utils.are_next_consecutive_token_types_ignoring_whitespace([token.ieee.std_logic_1164.function.falling_edge], iToken + 1, lTokens) or \ utils.are_next_consecutive_token_types_ignoring_whitespace([None, parser.tic, parser.event_keyword, token.logical_operator.and_operator, None, token.relational_operator.equal], iToken + 1, lTokens) or \ utils.are_next_consecutive_token_types_ignoring_whitespace([parser.open_parenthesis, None, parser.tic, parser.event_keyword, token.logical_operator.and_operator, None, token.relational_operator.equal], iToken + 1, lTokens): return True return False
def analyze(self, oFile): lToi = [] for lTokenPair in self.lTokenPairs: aToi = oFile.get_tokens_bounded_by(lTokenPair[0], lTokenPair[1], bExcludeLastToken=self.bExcludeLastToken) lToi = utils.combine_two_token_class_lists(lToi, aToi) for oToi in lToi: iLine, lTokens = utils.get_toi_parameters(oToi) if utils.are_next_consecutive_token_types_ignoring_whitespace([parser.open_parenthesis], 1, lTokens): continue iStartColumn = calculate_start_column(oFile, oToi) lColumn = [] lColumn.append(iStartColumn) bCheckAlignment = False iFirstColumn = oFile.get_column_of_token_index(oToi.get_start_index()) iColumn = iFirstColumn iPreviousColumn = 0 iIndent = 0 # print('-'*80) for iToken, oToken in enumerate(lTokens): iLine = utils.increment_line_number(iLine, oToken) if isinstance(oToken, parser.carriage_return): bCheckAlignment = True iPreviousColumn = lColumn[-1] iColumn = 0 if isinstance(lTokens[iToken + 1], parser.whitespace): iIndent = len(lTokens[iToken + 1].get_value()) else: iIndent = 0 continue if isinstance(oToken, parser.blank_line): bCheckAlignment = False continue iColumn += len(oToken.get_value()) if isinstance(oToken, parser.open_parenthesis): lColumn.append(iColumn + iPreviousColumn - iIndent) if isinstance(oToken, parser.close_parenthesis): lColumn.pop() if bCheckAlignment: if isinstance(oToken, parser.whitespace): if len(oToken.get_value()) != lColumn[-1]: dAction = {} dAction['line'] = iLine dAction['column'] = lColumn[-1] dAction['action'] = 'adjust' dAction['indent'] = iIndent dAction['previous'] = iPreviousColumn oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), self.solution) oViolation.set_action(dAction) self.add_violation(oViolation) # print(dAction) else: if lColumn != 0: dAction = {} if isinstance(oToken, parser.open_parenthesis): dAction['column'] = lColumn[-2] else: dAction['column'] = lColumn[-1] dAction['action'] = 'insert' oViolation = violation.New(iLine, oToi.extract_tokens(iToken, iToken), self.solution) oViolation.set_action(dAction) self.add_violation(oViolation) bCheckAlignment = False