def _fix_violation(self, oViolation): dAction = oViolation.get_action() if dAction['convert_to'] == 'edge': lTokens = [] if dAction['edge'] == 'rising_edge': lTokens.append(token.ieee.std_logic_1164.function.rising_edge('rising_edge')) else: lTokens.append(token.ieee.std_logic_1164.function.falling_edge('falling_edge')) lTokens.append(parser.open_parenthesis()) lTokens.append(parser.todo(dAction['clock'])) lTokens.append(parser.close_parenthesis()) else: lTokens = [] lTokens.append(parser.todo(dAction['clock'])) lTokens.append(parser.tic("'")) lTokens.append(parser.event_keyword('event')) lTokens.append(parser.whitespace(' ')) lTokens.append(token.logical_operator.and_operator('and')) lTokens.append(parser.whitespace(' ')) lTokens.append(parser.todo(dAction['clock'])) lTokens.append(parser.whitespace(' ')) lTokens.append(token.relational_operator.equal('=')) lTokens.append(parser.whitespace(' ')) lTokens.append(parser.character_literal(dAction['edge'])) oViolation.set_tokens(lTokens)
def _fix_violation(self, oFile): for oViolation in self.violations: lTokens = oViolation.get_tokens() lTokens.insert(0, parser.open_parenthesis()) lTokens.append(parser.close_parenthesis()) oViolation.set_tokens(lTokens) oFile.update(self.violations)
def post_token_assignments(lTokens): oCodeTags = code_tags.New() for iToken, oToken in enumerate(lTokens): oToken.set_code_tags(oCodeTags.get_tags()) if isinstance(oToken, parser.todo): sValue = oToken.get_value() if sValue == '&': lTokens[iToken] = adding_operator.concat() continue if sValue == '+': if utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.open_parenthesis], iToken - 1, lTokens): lTokens[iToken] = sign.plus() elif utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.keyword], iToken - 1, lTokens): lTokens[iToken] = sign.plus() elif utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.assignment], iToken - 1, lTokens): lTokens[iToken] = sign.plus() else: lTokens[iToken] = adding_operator.plus() continue if sValue == '-': if utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.open_parenthesis], iToken - 1, lTokens): lTokens[iToken] = sign.minus() elif utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.keyword], iToken - 1, lTokens): lTokens[iToken] = sign.minus() elif utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.assignment], iToken - 1, lTokens): lTokens[iToken] = sign.minus() else: lTokens[iToken] = adding_operator.minus() continue if sValue == '(': lTokens[iToken] = parser.open_parenthesis() continue if sValue == ')': lTokens[iToken] = parser.close_parenthesis() continue if sValue == ',': lTokens[iToken] = parser.comma() continue if sValue.lower() == 'to': lTokens[iToken] = token.direction.to(sValue) continue if sValue.lower() == 'downto': lTokens[iToken] = token.direction.downto(sValue) continue if sValue.lower() == 'and': lTokens[iToken] = token.logical_operator.and_operator(sValue) continue if sValue.lower() == 'or': lTokens[iToken] = token.logical_operator.or_operator(sValue) continue if sValue.lower() == 'nand': lTokens[iToken] = token.logical_operator.nand_operator(sValue) continue if sValue.lower() == 'nor': lTokens[iToken] = token.logical_operator.nor_operator(sValue) continue if sValue.lower() == 'xor': lTokens[iToken] = token.logical_operator.xor_operator(sValue) continue if sValue.lower() == 'xnor': lTokens[iToken] = token.logical_operator.xnor_operator(sValue) continue if sValue.lower() == '**': lTokens[iToken] = token.miscellaneous_operator.double_star( sValue) continue if sValue.lower() == 'abs': lTokens[iToken] = token.miscellaneous_operator.abs_operator( sValue) continue if sValue.lower() == 'not': lTokens[iToken] = token.miscellaneous_operator.not_operator( sValue) continue if sValue.lower() == '*': lTokens[iToken] = token.multiplying_operator.star(sValue) continue if sValue.lower() == '/': lTokens[iToken] = token.multiplying_operator.slash(sValue) continue if sValue.lower() == 'mod': lTokens[iToken] = token.multiplying_operator.mod_operator( sValue) continue if sValue.lower() == 'rem': lTokens[iToken] = token.multiplying_operator.rem_operator( sValue) continue if sValue == '=': lTokens[iToken] = token.relational_operator.equal(sValue) continue if sValue == "'": lTokens[iToken] = parser.tic(sValue) continue if sValue.lower() == 'event': lTokens[iToken] = parser.event_keyword(sValue) continue ### IEEE values if sValue.lower() == 'rising_edge': lTokens[ iToken] = token.ieee.std_logic_1164.function.rising_edge( sValue) continue if sValue.lower() == 'falling_edge': lTokens[ iToken] = token.ieee.std_logic_1164.function.falling_edge( sValue) continue if sValue.lower() == 'std_logic_vector': lTokens[ iToken] = token.ieee.std_logic_1164.types.std_logic_vector( sValue) if sValue.lower() == 'std_ulogic_vector': lTokens[ iToken] = token.ieee.std_logic_1164.types.std_ulogic_vector( sValue) if sValue.lower() == 'std_ulogic': lTokens[iToken] = token.ieee.std_logic_1164.types.std_ulogic( sValue) if len(sValue) == 3 and sValue.startswith("'") and sValue.endswith( "'"): lTokens[iToken] = parser.character_literal(sValue) continue else: oCodeTags.update(oToken) oToken.set_code_tags(oCodeTags.get_tags()) sValue = oToken.get_value() if sValue == '+': if utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.open_parenthesis], iToken - 1, lTokens): lTokens[iToken] = sign.plus() elif utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.keyword], iToken - 1, lTokens): lTokens[iToken] = sign.plus() else: lTokens[iToken] = adding_operator.plus() continue if sValue == '-': if utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.open_parenthesis], iToken - 1, lTokens): lTokens[iToken] = sign.minus() elif utils.are_previous_consecutive_token_types_ignoring_whitespace( [parser.keyword], iToken - 1, lTokens): lTokens[iToken] = sign.minus() else: lTokens[iToken] = adding_operator.minus() continue
def add_enclosing_parens(oViolation): lTokens = oViolation.get_tokens() rules_utils.insert_token(lTokens, 0, parser.open_parenthesis()) lTokens.append(parser.close_parenthesis()) oViolation.set_tokens(lTokens)
def _fix_violation(self, oViolation): lTokens = oViolation.get_tokens() lTokens.insert(0, parser.open_parenthesis()) lTokens.append(parser.close_parenthesis()) oViolation.set_tokens(lTokens)