Beispiel #1
0
    def analyze(self, oFile):
        lToi = []
        lPrevious = []
        for lSequence in self.lSequences:
            if not lSequence[0] in lPrevious:
                aToi = oFile.get_tokens_bounded_by(lSequence[0], self.oLeftToken)
                lToi = utils.combine_two_token_class_lists(lToi, aToi)
            lPrevious.append(lSequence[0])

        for oToi in lToi:
            lTokens = oToi.get_tokens()
            for iToken, oToken in enumerate(lTokens):
                bFound = False
                for lSequence in self.lSequences:
                    if isinstance(oToken, lSequence[0]):
                        if utils.are_next_consecutive_token_types(lSequence, iToken, lTokens):
                            bFound = True
                            break
                        if utils.are_next_consecutive_token_types(lSequence[:-1], iToken, lTokens):
                            dAction = {}
                            dAction['num_tokens'] = len(lSequence) - 1
                            break
                        if utils.are_next_consecutive_token_types(lSequence[:-2], iToken, lTokens):
                            dAction = {}
                            dAction['num_tokens'] = len(lSequence) - 2
                            break
                            
                if bFound:
                    break
            else:
                sSolution = self.solution
                oViolation = violation.New(oToi.get_line_number(), oToi, sSolution)
                oViolation.set_action(dAction)
                self.add_violation(oViolation)
Beispiel #2
0
    def _analyze(self, lToi):
        for oToi in lToi:
            lTokens = oToi.get_tokens()

            if utils.find_carriage_return(
                    lTokens) is None and self.allow_single_line:
                for oSplitToken in self.lSplitTokens:
                    if utils.count_token_types_in_list_of_tokens(
                            oSplitToken, lTokens) > 1:
                        break
                else:
                    continue

            iLine = oToi.get_line_number()
            for iToken, oToken in enumerate(lTokens):
                iLine = utils.increment_line_number(iLine, oToken)
                for oSplitToken in self.lSplitTokens:
                    if isinstance(oToken, oSplitToken):
                        if utils.are_next_consecutive_token_types(
                            [parser.whitespace, parser.comment], iToken + 1,
                                lTokens):
                            continue
                        if utils.are_next_consecutive_token_types(
                            [parser.comment], iToken + 1, lTokens):
                            continue
                        if utils.are_next_consecutive_token_types(
                            [parser.carriage_return], iToken + 1, lTokens):
                            continue
                        oViolation = violation.New(
                            iLine, oToi.extract_tokens(iToken, iToken),
                            self.solution)
                        self.add_violation(oViolation)
                        break
Beispiel #3
0
def _analyze_require_blank_line(self, lToi):
    sSolution = 'Insert blank line above *begin* keyword'
    for oToi in lToi:
        lTokens = oToi.get_tokens()
        iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens)
        lTokens.reverse()
        if utils.are_next_consecutive_token_types_ignoring_whitespace(
            [token.begin_keyword, token.is_keyword], 0, lTokens):
            continue
        if utils.are_next_consecutive_token_types_ignoring_whitespace(
            [token.begin_keyword, token.close_parenthesis], 0, lTokens):
            continue
        if utils.are_next_consecutive_token_types_ignoring_whitespace(
            [token.begin_keyword, token.process_keyword], 0, lTokens):
            continue
        if utils.are_next_consecutive_token_types([
                token.begin_keyword, parser.whitespace, parser.carriage_return,
                parser.blank_line
        ], 0, lTokens):
            continue
        if utils.are_next_consecutive_token_types(
            [token.begin_keyword, parser.carriage_return, parser.blank_line],
                0, lTokens):
            continue
        dAction = {}
        dAction['action'] = 'Insert'
        if isinstance(lTokens[1], parser.whitespace):
            dAction['index'] = len(lTokens) - 2
        else:
            dAction['index'] = len(lTokens) - 1
        lTokens.reverse()
        oViolation = violation.New(iLine, oToi, sSolution)
        oViolation.set_action(dAction)
        self.add_violation(oViolation)
Beispiel #4
0
 def analyze(self, oFile):
     lToi = oFile.get_tokens_bounded_by(token.process_keyword, token.begin_keyword)
     for oToi in lToi:
         lTokens = oToi.get_tokens()
         iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens)
         lTokens.reverse()
         if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.is_keyword], 0, lTokens):
             continue
         if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.close_parenthesis], 0, lTokens):
             continue
         if utils.are_next_consecutive_token_types_ignoring_whitespace([token.begin_keyword, token.process_keyword], 0, lTokens):
             continue
         if utils.are_next_consecutive_token_types([token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line], 0, lTokens):
             continue
         if utils.are_next_consecutive_token_types([token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens):
             continue
         dAction = {}
         if isinstance(lTokens[1], parser.whitespace):
             dAction['insert'] = len(lTokens) - 2
         else:
             dAction['insert'] = len(lTokens) - 1
         lTokens.reverse()
         oViolation = violation.New(iLine, oToi, self.solution)
         oViolation.set_action(dAction)
         self.add_violation(oViolation)
    def _analyze(self, lToi):
        for oToi in lToi:
            lTokens = oToi.get_tokens()
            for iToken, oToken in enumerate(lTokens):
                for lSequence in self.lSequences:
                    bFound = False
                    if isinstance(oToken, lSequence[0]):
                        if utils.are_next_consecutive_token_types(
                                lSequence, iToken, lTokens):
                            bFound = True
                            break
                        if utils.are_next_consecutive_token_types(
                                lSequence[:-1], iToken, lTokens):
                            dAction = {}
                            dAction['num_tokens'] = len(lSequence) - 1
                        elif utils.are_next_consecutive_token_types(
                                lSequence[:-2], iToken, lTokens):
                            dAction = {}
                            dAction['num_tokens'] = len(lSequence) - 2

                if bFound:
                    break
            else:
                sSolution = self.solution
                oViolation = violation.New(oToi.get_line_number(), oToi,
                                           sSolution)
                oViolation.set_action(dAction)
                oViolation.set_remap()
                oViolation.fix_blank_lines = True
                self.add_violation(oViolation)
Beispiel #6
0
def does_a_blank_line_exist(iToken, lTokens):
    if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment, parser.carriage_return, parser.blank_line], iToken, lTokens):
        return True
    if utils.are_next_consecutive_token_types([parser.comment, parser.carriage_return, parser.blank_line], iToken, lTokens):
        return True
    if utils.are_next_consecutive_token_types([parser.carriage_return, parser.blank_line], iToken, lTokens):
        return True
    return False
 def _analyze(self, lToi):
     for oToi in lToi:
         lTokens = oToi.get_tokens()
         if not utils.are_next_consecutive_token_types([parser.whitespace, self.token_to_move], 1, lTokens) and \
            not utils.are_next_consecutive_token_types([self.token_to_move], 1, lTokens):
             oViolation = violation.New(oToi.get_line_number(), oToi, self.solution)
             oViolation.set_remap()
             oViolation.fix_blank_lines = True
             self.add_violation(oViolation)
Beispiel #8
0
    def _analyze(self, lToi):
        for oToi in lToi:
           lTokens = oToi.get_tokens()
#           print(lTokens)
           if utils.are_next_consecutive_token_types([parser.carriage_return], 1, lTokens):
               continue
           if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], 1, lTokens):
               continue
           if utils.are_next_consecutive_token_types([parser.comment], 1, lTokens):
               continue
           else:
               self.add_violation(violation.New(oToi.get_line_number(), oToi, self.solution))
Beispiel #9
0
    def analyze(self, oFile):

        lToi = oFile.get_token_and_n_tokens_after_it_when_between_tokens(self.lTokens, 2, self.oStart, self.oEnd)
        for oToi in lToi:
           lTokens = oToi.get_tokens()
           if utils.are_next_consecutive_token_types([parser.carriage_return], 1, lTokens):
               continue
           if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment, parser.carriage_return], 1, lTokens):
               continue
           if utils.are_next_consecutive_token_types([parser.comment, parser.carriage_return], 1, lTokens):
               continue
           else:
               self.violations.append(violation.New(oToi.get_line_number(), oToi, self.solution))
Beispiel #10
0
    def analyze(self, oFile):

        lToi = oFile.get_tokens_bounded_by(self.token, parser.carriage_return)
        for oToi in lToi:
           lTokens = oToi.get_tokens()
           if utils.are_next_consecutive_token_types([parser.carriage_return], 1, lTokens):
               continue
           if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment, parser.carriage_return], 1, lTokens):
               continue
           if utils.are_next_consecutive_token_types([parser.comment, parser.carriage_return], 1, lTokens):
               continue
           else:
               self.violations.append(violation.New(oToi.get_line_number(), oToi, self.solution))
Beispiel #11
0
def _check_last_paren_new_line(self, oToi):

    if self.last_paren_new_line == 'ignore':
        return
    iLine, lTokens = utils.get_toi_parameters(oToi)
    lTokens.reverse()
    iLine = iLine + utils.count_carriage_returns(lTokens)
    bReturnFound = False
    bCommentFound = False
    for iToken, oToken in enumerate(lTokens):
        iLine = utils.decrement_line_number(iLine, oToken)
        if isinstance(oToken, parser.comment):
            bCommentFound = True
        if isinstance(oToken, parser.close_parenthesis):
            iEnd = len(lTokens) - iToken - 1
            if utils.are_next_consecutive_token_types(
                [parser.whitespace, parser.carriage_return], iToken + 1,
                    lTokens):
                bReturnFound = True
            elif utils.are_next_consecutive_token_types(
                [parser.carriage_return], iToken + 1, lTokens):
                bReturnFound = True

            lTokens.reverse()

            if self.last_paren_new_line == 'yes' and not bReturnFound:
                if self.move_last_comment == 'yes' and bCommentFound:
                    sSolution = 'Move parenthesis after assignment to the next line and trailing comment to previous line.'
                    oViolation = _create_violation(oToi, iLine, iEnd - 1,
                                                   len(lTokens) - 1,
                                                   'last_paren_new_line',
                                                   'insert_and_move_comment',
                                                   sSolution)
                    self.add_violation(oViolation)
                else:
                    sSolution = 'Move closing parenthesis to the next line.'
                    oViolation = _create_violation(oToi, iLine, iEnd - 1, iEnd,
                                                   'last_paren_new_line',
                                                   'insert', sSolution)
                    self.add_violation(oViolation)
            elif self.last_paren_new_line == 'no' and bReturnFound:
                sSolution = 'Move closing parenthesis to previous line.'
                iStart = utils.find_previous_non_whitespace_token(
                    iEnd - 1, lTokens)
                oViolation = _create_violation(oToi, iLine, iStart, iEnd,
                                               'last_paren_new_line', 'remove',
                                               sSolution)
                self.add_violation(oViolation)

            break
Beispiel #12
0
 def _analyze(self, lToi):
     for oToi in lToi:
         iLine, lTokens = utils.get_toi_parameters(oToi)
         if not utils.does_token_type_exist_in_list_of_tokens(self.oMoveToken, lTokens):
             continue
         dAction = {}
         bPassing = False
         for iToken, oToken in enumerate(lTokens):
             iLine = utils.increment_line_number(iLine, oToken)
             for oAnchorToken in self.lAnchorTokens:
                 if isinstance(oToken, oAnchorToken):
                     dAction['insert'] = iToken + 1
                     sAnchorToken = oToken.get_value()
                     iAnchorLine = iLine
                     if utils.are_next_consecutive_token_types([parser.whitespace, self.oMoveToken], iToken + 1, lTokens):
                         bPassing = True
                         break
                     elif isinstance(lTokens[iToken + 1], self.oMoveToken):
                         bPassing = True
                         break
             if isinstance(oToken, self.oMoveToken):
                 iAnchorLine = iLine
                 dAction['move_index'] = iToken
                 sSolution = 'Move "' + oToken.get_value() + '" on line ' + str(iLine) + ' to the right of "' + sAnchorToken + '" on line ' + str(iAnchorLine)
             if bPassing:
                 break
         else:
             oViolation = violation.New(iAnchorLine, oToi, sSolution)
             oViolation.set_action(dAction)
             oViolation.set_remap()
             self.add_violation(oViolation)
Beispiel #13
0
    def _analyze(self, lToi):
        for oToi in lToi:
            lTokens = oToi.get_tokens()
            iLine = oToi.get_line_number()

            for iToken, oToken in enumerate(lTokens):
               if isinstance(oToken, parser.carriage_return):
                   iLine += 1
                   for oSearchToken in self.lTokens:
                       if utils.are_next_consecutive_token_types([parser.whitespace, oSearchToken], iToken + 1, lTokens) or \
                          utils.are_next_consecutive_token_types([oSearchToken], iToken + 1, lTokens):
                           oViolation = violation.New(iLine, oToi, self.solution)
                           dAction = {}
                           dAction['remove_to_index'] = iToken + 1
                           oViolation.set_action(dAction)
                           self.add_violation(oViolation)
 def _analyze(self, lToi):
     for oToi in lToi:
         lTokens = oToi.get_tokens()
         if utils.are_next_consecutive_token_types([parser.carriage_return],
                                                   1, lTokens):
             continue
         if utils.are_next_consecutive_token_types(
             [parser.whitespace, parser.comment], 1, lTokens):
             continue
         if utils.are_next_consecutive_token_types([parser.comment], 1,
                                                   lTokens):
             continue
         for oToken in lTokens:
             if isinstance(oToken, self.oSameLineToken):
                 break
         else:
             self.add_violation(
                 violation.New(oToi.get_line_number(), oToi, self.solution))
Beispiel #15
0
 def _analyze(self, lToi):
     for oToi in lToi:
         lTokens = oToi.get_tokens()
         iLine = oToi.get_line_number()
         for iToken, oToken in enumerate(lTokens):
             if isinstance(oToken, parser.carriage_return):
                 iLine += 1
                 if utils.are_next_consecutive_token_types(
                     [parser.whitespace, self.oRemoveToken], iToken + 1,
                         lTokens):
                     oSubToi = oToi.extract_tokens(iToken, iToken + 2)
                     oViolation = violation.New(iLine, oSubToi,
                                                self.solution)
                     self.add_violation(oViolation)
                 if utils.are_next_consecutive_token_types(
                     [self.oRemoveToken], iToken + 1, lTokens):
                     oSubToi = oToi.extract_tokens(iToken, iToken + 1)
                     oViolation = violation.New(iLine, oSubToi,
                                                self.solution)
                     self.add_violation(oViolation)
Beispiel #16
0
def _analyze_no_blank_line(self, lToi):
    sSolution = 'Remove blank line(s) above *begin* keyword'
    for oToi in lToi:
        lTokens = oToi.get_tokens()
        iLine = oToi.get_line_number() + utils.count_carriage_returns(lTokens)
        lTokens.reverse()
        if utils.are_next_consecutive_token_types_ignoring_whitespace(
            [token.begin_keyword, token.is_keyword], 0, lTokens):
            continue
        if utils.are_next_consecutive_token_types_ignoring_whitespace(
            [token.begin_keyword, token.close_parenthesis], 0, lTokens):
            continue
        if utils.are_next_consecutive_token_types_ignoring_whitespace(
            [token.begin_keyword, token.process_keyword], 0, lTokens):
            continue
        if not utils.are_next_consecutive_token_types([token.begin_keyword, parser.whitespace, parser.carriage_return, parser.blank_line], 0, lTokens) and \
           not utils.are_next_consecutive_token_types([token.begin_keyword, parser.carriage_return, parser.blank_line], 0, lTokens):
            continue
        dAction = {}
        dAction['action'] = 'Remove'

        if isinstance(lTokens[1], parser.whitespace):
            iEnd = len(lTokens) - 2
        else:
            iEnd = len(lTokens) - 3

        for iToken, oToken in enumerate(lTokens):
            if isinstance(oToken, parser.carriage_return):
                if not isinstance(lTokens[iToken + 1], parser.carriage_return):
                    iStart = len(lTokens) - iToken - 2
                    break

        lTokens.reverse()

        dAction['start'] = iStart
        dAction['end'] = iEnd
        oViolation = violation.New(iLine, oToi, sSolution)
        oViolation.set_action(dAction)
        self.add_violation(oViolation)
Beispiel #17
0
    def _analyze(self, lToi):
        oToi = lToi[0]
        iLine, lTokens = utils.get_toi_parameters(oToi)
        for iToken, oToken in enumerate(lTokens[:len(lTokens) - 2]):

            iLine = utils.increment_line_number(iLine, oToken)

            if oToken.get_value().lower() == 'std_logic_vector':
                if utils.are_next_consecutive_token_types(
                    [parser.whitespace, parser.open_parenthesis], iToken + 1,
                        lTokens):
                    lExtractedTokens = oToi.extract_tokens(iToken, iToken + 1)
                    oViolation = violation.New(iLine, lExtractedTokens,
                                               self.solution)
                    self.add_violation(oViolation)
Beispiel #18
0
    def analyze(self, oFile):
        lToi = oFile.get_tokens_bounded_by(self.left_token, self.right_token)
        for oToi in lToi:
            lTokens = oToi.get_tokens()
            iLine = oToi.get_line_number()
            iColumn = 0
            bTokenFound = False
            iToken = -1
            bSkip = False
            oEndSkipToken = None
            dAnalysis = {}

            for iIndex in range(0, len(lTokens)):
                iToken += 1
                oToken = lTokens[iIndex]

                bSkip, oEndSkipToken = check_for_exclusions(
                    oToken, bSkip, oEndSkipToken, self.lUnless)

                if not bTokenFound and not bSkip:
                    for oSearch in self.lTokens:
                        if isinstance(oToken, oSearch):
                            bTokenFound = True
                            dAnalysis[iLine] = {}
                            dAnalysis[iLine]['token_column'] = iColumn
                            dAnalysis[iLine]['token_index'] = iToken
                            dAnalysis[iLine]['line_number'] = iLine
                            dAnalysis[iLine]['token_value'] = oToken.get_value(
                            )
                            if isinstance(lTokens[iIndex - 1],
                                          parser.whitespace):
                                dAnalysis[iLine][
                                    'left_column'] = iColumn - len(
                                        lTokens[iIndex - 1].get_value())
                            else:
                                dAnalysis[iLine]['left_column'] = iColumn
                            break

                    iColumn += len(oToken.get_value())

                if isinstance(oToken, parser.carriage_return):
                    iLine += 1
                    iColumn = 0
                    bTokenFound = False
                    iToken = -1
                    if self.comment_line_ends_group:
                        if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \
                           utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)

                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + dAnalysis[iKey][
                                        'token_value'] + ' ' + str(
                                            dAnalysis[iKey]
                                            ['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}
                    if self.blank_line_ends_group:
                        if utils.are_next_consecutive_token_types(
                            [parser.blank_line], iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)

                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + dAnalysis[iKey][
                                        'token_value'] + ' ' + str(
                                            dAnalysis[iKey]
                                            ['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}

                    if self.if_control_statements_ends_group:
                        if check_for_if_keywords(iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)

                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + dAnalysis[iKey][
                                        'token_value'] + ' ' + str(
                                            dAnalysis[iKey]
                                            ['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}

                    if self.case_control_statements_ends_group:
                        if check_for_case_keywords(iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)

                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + dAnalysis[iKey][
                                        'token_value'] + ' ' + str(
                                            dAnalysis[iKey]
                                            ['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}

            add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)

            for iKey in list(dAnalysis.keys()):
                if dAnalysis[iKey]['adjust'] != 0:
                    oLineTokens = oFile.get_tokens_from_line(iKey)
                    sSolution = 'Move ' + dAnalysis[iKey][
                        'token_value'] + ' ' + str(
                            dAnalysis[iKey]['adjust']) + ' columns'
                    oViolation = violation.New(oLineTokens.get_line_number(),
                                               oLineTokens, sSolution)
                    oViolation.set_action(dAnalysis[iKey])
                    self.add_violation(oViolation)

            dAnalysis = {}
Beispiel #19
0
    def analyze(self, oFile):

        lIncludeLines = []
        if not self.blank_line_ends_group:
            lIncludeLines.append(parser.blank_line)
        if not self.comment_line_ends_group:
            lIncludeLines.append(parser.comment)

        dAnalysis = {}

        lToi = oFile.get_tokens_bounded_by(self.left_token, self.right_token)
        for oToi in lToi:
            lTokens = oToi.get_tokens()
            iLine = oToi.get_line_number()
            iColumn = 0
            bTokenFound = False
            iToken = -1

            for iIndex in range(0, len(lTokens)):
               iToken += 1
               oToken = lTokens[iIndex]

               if not bTokenFound:
                   for oSearch in self.lTokens:
                       if isinstance(oToken, oSearch):
                           bTokenFound = True
                           dAnalysis[iLine] = {}
                           dAnalysis[iLine]['token_column'] = iColumn
                           dAnalysis[iLine]['token_index'] = iToken
                           dAnalysis[iLine]['line_number'] = iLine
                           if isinstance(lTokens[iIndex -1], parser.whitespace):
                               dAnalysis[iLine]['left_column'] = iColumn - len(lTokens[iIndex - 1].get_value())
                           else:
                               dAnalysis[iLine]['left_column'] = iColumn
                           break

                   iColumn += len(oToken.get_value())

               if isinstance(oToken, token.generic_clause.semicolon) and self.separate_generic_port_alignment:
                   add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)
                   for iKey in list(dAnalysis.keys()):
                       if dAnalysis[iKey]['adjust'] != 0:
                           oLineTokens = oFile.get_tokens_from_line(iKey)
                           oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution)
                           oViolation.set_action(dAnalysis[iKey])
                           self.violations.append(oViolation)

                   dAnalysis = {}

               if isinstance(oToken, token.generic_map_aspect.close_parenthesis) and self.separate_generic_port_alignment:
                   add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)
                   for iKey in list(dAnalysis.keys()):
                       if dAnalysis[iKey]['adjust'] != 0:
                           oLineTokens = oFile.get_tokens_from_line(iKey)
                           oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution)
                           oViolation.set_action(dAnalysis[iKey])
                           self.violations.append(oViolation)

                   dAnalysis = {}

               
               if isinstance(oToken, parser.carriage_return):
                   iLine += 1
                   iColumn = 0
                   bTokenFound = False
                   iToken = -1
                   if self.comment_line_ends_group:
                       if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \
                          utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens):
                           add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)
                           for iKey in list(dAnalysis.keys()):
                               if dAnalysis[iKey]['adjust'] != 0:
                                   oLineTokens = oFile.get_tokens_from_line(iKey)
                                   oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution)
                                   oViolation.set_action(dAnalysis[iKey])
                                   self.violations.append(oViolation)

                           dAnalysis = {}

                   if self.blank_line_ends_group:
                       if utils.are_next_consecutive_token_types([parser.blank_line], iIndex + 1, lTokens):
                           add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)

                           for iKey in list(dAnalysis.keys()):
                               if dAnalysis[iKey]['adjust'] != 0:
                                   oLineTokens = oFile.get_tokens_from_line(iKey)
                                   oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution)
                                   oViolation.set_action(dAnalysis[iKey])
                                   self.violations.append(oViolation)

                           dAnalysis = {}

            add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)

            for iKey in list(dAnalysis.keys()):
                if dAnalysis[iKey]['adjust'] != 0:
                    oLineTokens = oFile.get_tokens_from_line(iKey)
                    oViolation = violation.New(oLineTokens.get_line_number(), oLineTokens, self.solution)
                    oViolation.set_action(dAnalysis[iKey])
                    self.violations.append(oViolation)

            dAnalysis = {}
Beispiel #20
0
 def analyze(self, oFile):
     lToi = oFile.get_tokens_bounded_by(self.anchor_token, self.token_to_move)
     for oToi in lToi:
         lTokens = oToi.get_tokens()
         if not utils.are_next_consecutive_token_types([self.anchor_token, parser.whitespace, self.token_to_move], 0, lTokens):
             self.add_violation(violation.New(oToi.get_line_number(), oToi, self.solution)) 
Beispiel #21
0
    def analyze(self, oFile):
        lPreToi = oFile.get_tokens_bounded_by(self.oStart, self.oEnd)
        lToi = []

        for oToi in lPreToi:
            iLine, lTokens = utils.get_toi_parameters(oToi)
            bInsideClockDef = False
            for iToken, oToken in enumerate(lTokens):
                iLine = utils.increment_line_number(iLine, oToken)

                if not bInsideClockDef:
                    if detect_clock_definition(iToken, oToken, lTokens):
                        bInsideClockDef = True
                        iStartIndex = iToken
                    continue

                if isinstance(oToken, token.if_statement.semicolon
                              ) and oToken.get_hierarchy() == 0:
                    lToi.append(oToi.extract_tokens(iStartIndex, iToken))
                    bInsideClockDef = False
                    continue

        ### jcl - need to figure out how to do this better without copying
        dAnalysis = {}
        for oToi in lToi:
            lTokens = oToi.get_tokens()
            iLine = oToi.get_line_number()
            iColumn = 0
            bTokenFound = False
            iToken = -1

            for iIndex in range(0, len(lTokens)):
                iToken += 1
                oToken = lTokens[iIndex]

                if not bTokenFound:
                    for oSearch in self.lTokens:
                        if isinstance(oToken, oSearch):
                            bTokenFound = True
                            dAnalysis[iLine] = {}
                            dAnalysis[iLine]['token_column'] = iColumn
                            dAnalysis[iLine]['token_index'] = iToken
                            dAnalysis[iLine]['line_number'] = iLine
                            if isinstance(lTokens[iIndex - 1],
                                          parser.whitespace):
                                dAnalysis[iLine][
                                    'left_column'] = iColumn - len(
                                        lTokens[iIndex - 1].get_value())
                            else:
                                dAnalysis[iLine]['left_column'] = iColumn
                            break

                    iColumn += len(oToken.get_value())

                if isinstance(oToken, parser.carriage_return):
                    iLine += 1
                    iColumn = 0
                    bTokenFound = False
                    iToken = -1
                    if self.comment_line_ends_group:
                        if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \
                           utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)
                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, self.solution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}

                    if self.blank_line_ends_group:
                        if utils.are_next_consecutive_token_types(
                            [parser.blank_line], iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)

                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, self.solution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}

            add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)

            for iKey in list(dAnalysis.keys()):
                if dAnalysis[iKey]['adjust'] != 0:
                    oLineTokens = oFile.get_tokens_from_line(iKey)
                    oViolation = violation.New(oLineTokens.get_line_number(),
                                               oLineTokens, self.solution)
                    oViolation.set_action(dAnalysis[iKey])
                    self.add_violation(oViolation)

            dAnalysis = {}
    def analyze(self, oFile):
        lToi = oFile.get_tokens_bounded_by(self.left_token, self.right_token)
        for oToi in lToi:
            lTokens = oToi.get_tokens()
            iLine = oToi.get_line_number()
            iColumn = 0
            bTokenFound = False
            iToken = -1
            bSkip = False
            iMaxColumn = 0
            iLeftColumn = 0
            dAnalysis = {}

            for iIndex in range(0, len(lTokens)):
                iToken += 1
                oToken = lTokens[iIndex]
                iLeftColumn += oToken.length()

                if not bTokenFound and not bSkip:
                    for oSearch in self.lTokens:
                        if isinstance(oToken, oSearch):
                            bTokenFound = True
                            dAnalysis[iLine] = {}
                            dAnalysis[iLine]['token_column'] = iColumn
                            dAnalysis[iLine]['token_index'] = iToken
                            dAnalysis[iLine]['line_number'] = iLine
                            if isinstance(lTokens[iIndex - 1],
                                          parser.whitespace):
                                dAnalysis[iLine][
                                    'left_column'] = iColumn - len(
                                        lTokens[iIndex - 1].get_value())
                            else:
                                dAnalysis[iLine]['left_column'] = iColumn
                            break

                    iColumn += len(oToken.get_value())

                if isinstance(oToken, token.generic_clause.semicolon
                              ) and self.separate_generic_port_alignment:
                    add_adjustments_to_dAnalysis(dAnalysis,
                                                 self.compact_alignment)
                    for iKey in list(dAnalysis.keys()):
                        if dAnalysis[iKey]['adjust'] != 0:
                            oLineTokens = oFile.get_tokens_from_line(iKey)
                            sSolution = 'Move ' + str(
                                dAnalysis[iKey]['adjust']) + ' columns'
                            oViolation = violation.New(
                                oLineTokens.get_line_number(), oLineTokens,
                                sSolution)
                            oViolation.set_action(dAnalysis[iKey])
                            self.add_violation(oViolation)

                    dAnalysis = {}

                if isinstance(oToken,
                              token.generic_map_aspect.close_parenthesis
                              ) and self.separate_generic_port_alignment:
                    add_adjustments_to_dAnalysis(dAnalysis,
                                                 self.compact_alignment)
                    for iKey in list(dAnalysis.keys()):
                        if dAnalysis[iKey]['adjust'] != 0:
                            oLineTokens = oFile.get_tokens_from_line(iKey)
                            sSolution = 'Move ' + str(
                                dAnalysis[iKey]['adjust']) + ' columns'
                            oViolation = violation.New(
                                oLineTokens.get_line_number(), oLineTokens,
                                sSolution)
                            oViolation.set_action(dAnalysis[iKey])
                            self.add_violation(oViolation)

                    dAnalysis = {}

                if isinstance(oToken, parser.comment):
                    iLeftColumn -= lTokens[iIndex].length()
                    if isinstance(lTokens[iIndex - 1], parser.whitespace):
                        iLeftColumn -= lTokens[iIndex - 1].length()

                if isinstance(oToken, parser.carriage_return):

                    iMaxColumn = max(iMaxColumn, iLeftColumn)
                    iLeftColumn = 0

                    iLine += 1
                    iColumn = 0
                    bTokenFound = False
                    iToken = -1

                    if bSkip:
                        bSkip = False

                    for oSkip in self.lSkip:
                        if utils.are_next_consecutive_token_types([parser.whitespace, oSkip], iIndex + 1, lTokens) or \
                           utils.are_next_consecutive_token_types([oSkip], iIndex + 1, lTokens):
                            bSkip = True
                            break

                    if bSkip:
                        continue

                    if self.comment_line_ends_group:
                        if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \
                           utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)

                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + str(
                                        dAnalysis[iKey]['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}
                    if self.blank_line_ends_group:
                        if utils.are_next_consecutive_token_types(
                            [parser.blank_line], iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)

                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + str(
                                        dAnalysis[iKey]['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)

                            dAnalysis = {}

            add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment,
                                         self.include_lines_without_comments,
                                         iMaxColumn)

            for iKey in list(dAnalysis.keys()):
                if dAnalysis[iKey]['adjust'] != 0:
                    oLineTokens = oFile.get_tokens_from_line(iKey)
                    sSolution = 'Move ' + str(
                        dAnalysis[iKey]['adjust']) + ' columns'
                    oViolation = violation.New(oLineTokens.get_line_number(),
                                               oLineTokens, sSolution)
                    oViolation.set_action(dAnalysis[iKey])
                    self.add_violation(oViolation)

            dAnalysis = {}
Beispiel #23
0
    def analyze(self, oFile):

        lToi = oFile.get_tokens_bounded_by(self.left_token, self.right_token)
        for oToi in lToi:
            iLine, lTokens = utils.get_toi_parameters(oToi)
            dAnalysis = {}
            iColumn = 0
            iToken = -1
            iLeftColumn = 0
            bStartFound = False
            bEndFound = False

            for iIndex in range(0, len(lTokens)):
                iToken += 1
                oToken = lTokens[iIndex]
                #               print(f'{oToken} | {oToken.get_value()}')

                if bStartFound:
                    if isinstance(oToken, parser.carriage_return):
                        if utils.are_next_consecutive_token_types([parser.whitespace, parser.comment], iIndex + 1, lTokens) or \
                           utils.are_next_consecutive_token_types([parser.comment], iIndex + 1, lTokens):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)
                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + str(
                                        dAnalysis[iKey]['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)
                            dAnalysis = {}
                            bStartFound = False
                            bEndFound = False

                            iLine += 1
                            iLeftColumn = 0
                            iColumn = 0
                            iToken = -1
                            continue

                    elif isinstance(oToken, parser.comment):
                        #                       print(f'--> Comment Found     | {iLine} | {iColumn}')
                        dAnalysis[iLine] = {}
                        dAnalysis[iLine]['token_column'] = iColumn
                        dAnalysis[iLine]['token_index'] = iToken
                        dAnalysis[iLine]['line_number'] = iLine
                        if isinstance(lTokens[iIndex - 1], parser.whitespace):
                            dAnalysis[iLine]['left_column'] = iColumn - len(
                                lTokens[iIndex - 1].get_value())
                        else:
                            dAnalysis[iLine]['left_column'] = iColumn

                if isinstance(oToken, parser.carriage_return):
                    iLine += 1
                    iLeftColumn = 0
                    iColumn = 0
                    iToken = -1
                else:
                    iLeftColumn += oToken.length()
                    iColumn += oToken.length()

                if bEndFound:
                    for oStartToken in self.lStart:
                        if isinstance(oToken, oStartToken):
                            bStartFound = False
                            bEndFound = False
                            break
                    else:
                        if not isinstance(oToken, parser.whitespace) and \
                           not isinstance(oToken, parser.comment) and \
                           not isinstance(oToken, parser.carriage_return):
                            add_adjustments_to_dAnalysis(
                                dAnalysis, self.compact_alignment)
                            for iKey in list(dAnalysis.keys()):
                                if dAnalysis[iKey]['adjust'] != 0:
                                    oLineTokens = oFile.get_tokens_from_line(
                                        iKey)
                                    sSolution = 'Move ' + str(
                                        dAnalysis[iKey]['adjust']) + ' columns'
                                    oViolation = violation.New(
                                        oLineTokens.get_line_number(),
                                        oLineTokens, sSolution)
                                    oViolation.set_action(dAnalysis[iKey])
                                    self.add_violation(oViolation)
                            dAnalysis = {}
                            bStartFound = False
                            bEndFound = False
                            continue

                if bStartFound:
                    for oEndToken in self.lEnd:
                        if isinstance(oToken, oEndToken):
                            #                           print(f'--> End Token Found   | {iLine} | {iColumn}')
                            bEndFound = True
                            break
                else:
                    for oStartToken in self.lStart:
                        if isinstance(oToken, oStartToken):
                            #                           print(f'--> Start Token Found | {iLine} | {iColumn} | {oToken}')
                            bStartFound = True
                            break
Beispiel #24
0
    def _analyze(self, lToi):
        for oToi in lToi:
            iLine, lTokens = utils.get_toi_parameters(oToi)
            iColumn = 0
            bSignalFound = False
            bSkip = False
            dAnalysis = {}
            dTemp = {}
            for iToken, oToken in enumerate(lTokens):

               iLine = utils.increment_line_number(iLine, oToken)

               if isinstance(oToken, parser.carriage_return):
                   iColumn = 0
               else:
                   iColumn += len(oToken.get_value())

               bSkip = check_for_exclusions(bSkip, oToken, self.lUnless)
               if bSkip:
                   continue

               bSignalFound = check_for_signal_declaration(bSignalFound, oToken)
               if not bSignalFound:
                   iComma = 0
                   continue

               if isinstance(oToken, token.signal_declaration.colon):
                   bSignalFound = False
                   if iComma == 1:
                       dAnalysis[dTemp['line_number']] = dTemp
                   continue

               if isinstance(oToken, parser.comma):
                   iComma += 1
                   if iComma == 2:
                       bSignalFound = False
                       continue

                   dTemp = {}
                   dTemp['comma_column'] = iColumn
                   dTemp['comma_index'] = iToken
                   dTemp['line_number'] = iLine
                   if utils.are_next_consecutive_token_types([parser.whitespace, token.signal_declaration.identifier], iToken + 1, lTokens):
                       dTemp['identifier_column'] = iColumn + len(lTokens[iToken + 1].get_value())
                       dTemp['token_index'] = iToken + 2
                       dTemp['token_value'] = lTokens[iToken + 2].get_value()
                   elif utils.are_next_consecutive_token_types([token.signal_declaration.identifier], iToken + 1, lTokens):
                       dTemp['identifier_column'] = iColumn + 1
                       dTemp['token_index'] = iToken + 1
                       dTemp['token_value'] = lTokens[iToken + 1].get_value()
                   else:
                       bSignalFound = False

            add_adjustments_to_dAnalysis(dAnalysis, self.compact_alignment)

            for iKey in list(dAnalysis.keys()):
                if dAnalysis[iKey]['adjust'] != 0:
                    oLineTokens = oToi.extract_tokens(dAnalysis[iKey]['comma_index'], dAnalysis[iKey]['token_index'])
                    sSolution = 'Move ' + dAnalysis[iKey]['token_value'] + ' ' + str(dAnalysis[iKey]['adjust']) + ' columns'
                    oViolation = violation.New(dAnalysis[iKey]['line_number'], oLineTokens, sSolution)
                    oViolation.set_action(dAnalysis[iKey])
                    self.add_violation(oViolation)

            dAnalysis = {}