def get_tokens_bounded_by_token_when_between_tokens(
        oLeft,
        oRight,
        oStart,
        oEnd,
        lAllTokens,
        oTokenMap,
        include_trailing_whitespace=False):

    lLeft, lRight = oTokenMap.get_token_pair_indexes(oLeft, oRight)
    lStart, lEnd = oTokenMap.get_token_pair_indexes(oStart, oEnd)
    lReturn = []
    for iStart, iEnd in zip(lStart, lEnd):
        for iLeft, iRight in zip(lLeft, lRight):
            if iStart < iLeft and iRight < iEnd:
                iLine = oTokenMap.get_line_number_of_index(iLeft)
                if include_trailing_whitespace:
                    if oTokenMap.is_token_at_index(iRight + 1,
                                                   parser.whitespace):
                        lReturn.append(
                            tokens.New(iLeft, iLine,
                                       lAllTokens[iLeft:iRight + 2]))
                    else:
                        lReturn.append(
                            tokens.New(iLeft, iLine,
                                       lAllTokens[iLeft:iRight + 1]))
                else:
                    lReturn.append(
                        tokens.New(iLeft, iLine, lAllTokens[iLeft:iRight + 1]))
    return lReturn
def get_tokens_at_beginning_of_line_matching_between_tokens(
        lTokens, oStart, oEnd, bInclusive, lAllTokens, oTokenMap):

    lIndexes = utils.get_indexes_of_token_list(lTokens, oTokenMap)

    lStart, lEnd = oTokenMap.get_token_pair_indexes(oStart, oEnd)

    lReturn = []
    for iIndex in lIndexes:
        if utils.is_index_between_indexes(iIndex,
                                          lStart,
                                          lEnd,
                                          bInclusive=bInclusive):
            if oTokenMap.is_token_at_index(parser.carriage_return, iIndex - 1):
                iLine = oTokenMap.get_line_number_of_index(iIndex)
                lReturn.append(tokens.New(iIndex, iLine, [lAllTokens[iIndex]]))
            elif oTokenMap.is_token_at_index(
                    parser.carriage_return,
                    iIndex - 2) and oTokenMap.is_token_at_index(
                        parser.whitespace, iIndex - 1):
                iLine = oTokenMap.get_line_number_of_index(iIndex)
                lReturn.append(
                    tokens.New(iIndex - 1, iLine,
                               lAllTokens[iIndex - 1:iIndex + 1]))

    return lReturn
示例#3
0
    def test_get_violations_w_vsg_output_method(self):
        oRule = rule.Rule('xyz', '001')
        oRule.solution = 'Solution'

        self.assertFalse(oRule.has_violations())

        oToken = parser.item('first')
        oTokens = tokens.New(0, 1, [oToken])

        oViolation = violation.New(1, oTokens, 'First')
        oRule.add_violation(oViolation)

        oToken = parser.item('second')
        oTokens = tokens.New(1, 2, [oToken])

        oViolation = violation.New(2, oTokens, 'Second')
        oRule.add_violation(oViolation)

        oToken = parser.item('third')
        oTokens = tokens.New(2, 3, [oToken])

        oViolation = violation.New(3, oTokens, 'Third')
        oRule.add_violation(oViolation)

        dActual = oRule.get_violations_at_linenumber(1)
        self.assertEqual('First', dActual[0]['solution'])

        dActual = oRule.get_violations_at_linenumber(2)
        self.assertEqual('Second', dActual[0]['solution'])
示例#4
0
def get_line_preceeding_line(iLine, lAllTokens, iNumLines, oTokenMap, bSkipComments=False):

    lCarriageReturns = oTokenMap.get_token_indexes(parser.carriage_return)

    if not bSkipComments:
        iAdjust = -2
        iStartIndex = iLine - iNumLines + iAdjust
        if iStartIndex < 0:
            iStart = 0
        else:
            iStart = lCarriageReturns[iStartIndex] + 1
        iEnd = lCarriageReturns[iLine + iAdjust]
        lTemp = lAllTokens[iStart:iEnd]
    #    print(f'{iLine} | {iStart} | {iEnd} | {lTemp}')
        return tokens.New(iStart, iLine, lTemp)
    else:

        iStartIndex = _get_start_index(lCarriageReturns, iLine, oTokenMap)

        if iStartIndex == 0:
            iTokenStartIndex = 0
            iTokenEndIndex = lCarriageReturns[iTokenStartIndex]
        else:
            iTokenStartIndex = lCarriageReturns[iStartIndex - 1] + 1
            iTokenEndIndex = lCarriageReturns[iStartIndex]

        iLine = iStartIndex + 2
        lTokens = lAllTokens[iTokenStartIndex:iTokenEndIndex]
        return tokens.New(iTokenStartIndex, iLine, lTokens)
def get_sequence_of_tokens_matching_bounded_by_tokens(lTokens, oStart, oEnd,
                                                      lAllTokens, oTokenMap):

    lReturn = []

    lStart = oTokenMap.get_token_indexes(oStart)
    lEnd = oTokenMap.get_token_indexes(oEnd)
    lIndexes = []
    for iStart, iEnd in zip(lStart, lEnd):
        lIndexes.extend(
            oTokenMap.get_token_indexes_between_indexes(
                lTokens[0], iStart, iEnd))

    lIndexes.sort()

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        for iToken, oToken in enumerate(lTokens):
            if not isinstance(lAllTokens[iToken + iIndex], oToken):
                break
        else:
            lReturn.append(
                tokens.New(iIndex, iLine,
                           lAllTokens[iIndex:iIndex + len(lTokens)]))

    return lReturn
示例#6
0
def get_tokens_between_tokens_inclusive_while_storing_value_from_token(
        left_token, right_token, value_token, lAllTokens, oTokenMap):

    lReturn = []

    lStart, lEnd = oTokenMap.get_token_pair_indexes(left_token, right_token)

    lValueIndexes = oTokenMap.get_token_indexes(value_token)

    iPreviousEnd = 0
    for iStart, iEnd in zip(lStart, lEnd):
        sValue = None

        iValueIndex = bisect.bisect_left(lValueIndexes, iEnd) - 1
        if iValueIndex >= 0:
            iValue = lValueIndexes[iValueIndex]

            oValueToken = lAllTokens[iValue]
            if iValue < iPreviousEnd:
                sValue = None
            else:
                sValue = oValueToken.get_value()
        else:
            sValue = None

        iLine = oTokenMap.get_line_number_of_index(iStart)

        oTokens = tokens.New(iStart, iLine, lAllTokens[iStart:iEnd + 1])
        oTokens.set_token_value(sValue)
        lReturn.append(oTokens)
        iPreviousEnd = iEnd

    return lReturn
示例#7
0
def get_tokens_bounded_by(oStart, oEnd, lAllObjects, oTokenMap, include_trailing_whitespace=False, bExcludeLastToken=False, bIncludeTillEndOfLine=False, bIncludeTillBeginningOfLine=False):

#    oTokenMap.pretty_print()
    lReturn = []

    lStart, lEnd = oTokenMap.get_token_pair_indexes(oStart, oEnd)

    lCarriageReturns = oTokenMap.get_token_indexes(parser.carriage_return)

    lNewStart = []
    if bIncludeTillBeginningOfLine:
        for iStart in lStart:
            lNewStart.append(oTokenMap.get_index_of_carriage_return_before_index(iStart) + 1)
    else:
        lNewStart = lStart

    if bExcludeLastToken:
        lEnd = [x - 1 for x in lEnd]

    lNewEnd = []
    if bIncludeTillEndOfLine:
        for iEnd in lEnd:
            lNewEnd.append(oTokenMap.get_index_of_carriage_return_after_index(iEnd))
    else:
        lNewEnd = lEnd

#    print(lStart)
#    print(lEnd)
#    print(lNewEnd)

    lWhiteSpace = oTokenMap.get_token_indexes(parser.whitespace)
    if include_trailing_whitespace:
        for iNewEnd, iIndex in enumerate(lNewEnd):
            if iIndex + 1 in lWhiteSpace:
                lNewEnd[iNewEnd] +=1
    elif not bIncludeTillEndOfLine:
        for iNewEnd, iIndex in enumerate(lNewEnd):
            if iIndex in lWhiteSpace:
                lNewEnd[iNewEnd] -= 1
            if lNewEnd[iNewEnd] in lCarriageReturns:
                lNewEnd[iNewEnd] -= 1

#    print(lStart)
#    print(lEnd)
#    print(lNewEnd)

#    for i in range(0, len(lStart)):
#        iStart = lStart[i]
#        iEnd = lEnd[i]
#        iNewEnd = lNewEnd[i]
#        print(f'{lAllObjects[iStart]} | {lAllObjects[iEnd]} | {lAllObjects[iNewEnd]}')

    for iStart, iEnd, iIndex in zip(lNewStart, lNewEnd, lEnd):
        lTemp = lAllObjects[iStart: iEnd + 1]
        iStartLine = oTokenMap.get_line_number_of_index(iStart)
        oToi = tokens.New(iStart, iStartLine, lTemp)
        oToi.set_token_value(iIndex - iStart)
        lReturn.append(oToi)

    return lReturn
示例#8
0
def get_blank_lines_below_line_ending_with_several_possible_tokens(
        lTokens, lAllTokens, oTokenMap):

    lReturn = []

    lIndexes = utils.get_indexes_of_tokens_between(lTokens[0], lTokens[1:],
                                                   oTokenMap)

    lCarriageReturns = oTokenMap.get_token_indexes(parser.carriage_return)
    lBlankLines = oTokenMap.get_token_indexes(parser.blank_line)

    for iIndex in lIndexes:
        if not utils.is_token_at_end_of_line(iIndex, oTokenMap):
            continue
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        iStart = lCarriageReturns[iLine - 1] + 1
        for i in range(iLine - 1, len(lCarriageReturns)):
            iCarriageReturnIndex = lCarriageReturns[i]
            if not iCarriageReturnIndex + 1 in lBlankLines:
                iEnd = lCarriageReturns[i] + 1
                lTemp = lAllTokens[iStart:iEnd]
                if len(lTemp) > 0:
                    lReturn.append(tokens.New(iStart, iLine, lTemp))
                break

    return lReturn
示例#9
0
def get_consecutive_lines_starting_with_token(search_token, min_num_lines,
                                              lAllTokens, oTokenMap):

    lReturn = []
    lSearchIndexes = utils.filter_indexes_which_start_a_line(
        search_token, oTokenMap)

    lSearchLines = []
    for iSearchIndex in lSearchIndexes:
        lSearchLines.append(oTokenMap.get_line_number_of_index(iSearchIndex))

    iStart = None
    for iIndex, iLine in enumerate(lSearchLines):

        if iStart is None:
            iStart = iLine
            iStartLine = iLine
            iStartIndex = iIndex
            iCurrent = iLine
        else:
            if iLine == iCurrent + 1:
                iCurrent = iLine
                iEndIndex = iIndex
            else:
                if lSearchLines[iIndex - 1] - iStartLine >= min_num_lines - 1:
                    iStartToken = oTokenMap.get_index_of_carriage_return_before_index(
                        lSearchIndexes[iStartIndex]) + 1
                    iEndToken = oTokenMap.get_index_of_carriage_return_after_index(
                        lSearchIndexes[iEndIndex])
                    lTemp = lAllTokens[iStartToken:iEndToken]
                    lReturn.append(tokens.New(iStartToken, iStartLine, lTemp))
                iStart = iLine
                iStartLine = iLine
                iStartIndex = iIndex
                iCurrent = iLine

    if lSearchLines[iIndex - 1] - iStartLine >= min_num_lines - 1:
        iStartToken = oTokenMap.get_index_of_carriage_return_before_index(
            lSearchIndexes[iStartIndex]) + 1
        iEndToken = oTokenMap.get_index_of_carriage_return_after_index(
            lSearchIndexes[iEndIndex])
        lTemp = lAllTokens[iStartToken:iEndToken]
        lReturn.append(tokens.New(iStartToken, iStartLine, lTemp))

    return lReturn
示例#10
0
def get_interface_elements_between_tokens(oStart, oEnd, lAllTokens, oTokenMap):
    lReturn = []
    lStartIndexes, lEndIndexes = oTokenMap.get_token_pair_indexes(oStart, oEnd)

    for iStart, iEnd in zip(lStartIndexes, lEndIndexes):
        iLine = oTokenMap.get_line_number_of_index(iStart)
        lToi = lAllTokens[iStart + 1:iEnd + 1]

        bStore = False
        iLineNumber = None
        lTemp = []
        for iIndex in range(0, len(lToi)):
            oToken = lToi[iIndex]
            if not isinstance(oToken, parser.whitespace) and not isinstance(oToken, parser.carriage_return) and not isinstance(oToken, parser.comment) and not bStore:
                bStore = True
                iStartIndex = iIndex + iStart + 1
                iLineNumber = iLine

            if bStore:
               lTemp.append(lToi[iIndex])

            if isinstance(oToken, token.interface_list.semicolon):
                lTemp.pop()
                lReturn.append(tokens.New(iStartIndex, iLineNumber, lTemp))
                lTemp = []
                bStore = False

            if isinstance(lToi[iIndex], parser.carriage_return):
                iLine +=1

        if len(lTemp) > 0:
            lTemp.pop()
            for i in range(1, 5):
                if isinstance(lTemp[-1], parser.whitespace):
                    lTemp.pop()
                    continue
                if isinstance(lTemp[-1], parser.carriage_return):
                    lTemp.pop()
                    continue
                if isinstance(lTemp[-1], parser.comment):
                    lTemp.pop()
                    continue
            lReturn.append(tokens.New(iStartIndex, iLineNumber, lTemp))

    return lReturn
示例#11
0
    def test_has_violations_method(self):
        oRule = rule.Rule()

        self.assertFalse(oRule.has_violations())

        oTokens = tokens.New(0, 0, [])

        oViolation = violation.New(0, oTokens, '')
        oRule.add_violation(oViolation)
        self.assertTrue(oRule.has_violations())
示例#12
0
def get_tokens_from_line(iLineNumber, lAllTokens, oTokenMap):
    lIndexes = oTokenMap.get_token_indexes(parser.carriage_return)
    iLine = iLineNumber - 2

    iStart = lIndexes[iLine] + 1
    iEnd = lIndexes[iLine + 1] + 1

    lTemp = lAllTokens[iStart:iEnd]

    return tokens.New(iStart, iLineNumber, lTemp)
示例#13
0
def get_token_and_n_tokens_after_it(lTokens, iTokens, lAllTokens, oTokenMap):
    lReturn = []

    lIndexes = utils.get_indexes_of_token_list(lTokens, oTokenMap)

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        iEnd = iIndex + iTokens + 1
        lReturn.append(tokens.New(iIndex, iLine, lAllTokens[iIndex:iEnd]))

    return lReturn
def get_tokens_at_beginning_of_line_matching_unless_between_tokens(
        lTokens, lUnless, lAllTokens, oTokenMap):

    lIndexes = utils.get_indexes_of_token_list(lTokens, oTokenMap)
    lIndexes = filter_indexes_in_unless_regions(lIndexes, lUnless, oTokenMap)

    lReturn = []
    for iIndex in lIndexes:
        if oTokenMap.is_token_at_index(parser.carriage_return, iIndex - 1):
            iLine = oTokenMap.get_line_number_of_index(iIndex)
            lReturn.append(tokens.New(iIndex, iLine, [lAllTokens[iIndex]]))
        elif oTokenMap.is_token_at_index(parser.carriage_return, iIndex -
                                         2) and oTokenMap.is_token_at_index(
                                             parser.whitespace, iIndex - 1):
            iLine = oTokenMap.get_line_number_of_index(iIndex)
            lReturn.append(
                tokens.New(iIndex - 1, iLine,
                           lAllTokens[iIndex - 1:iIndex + 1]))

    return lReturn
示例#15
0
def get_line_succeeding_line(iLine, lAllTokens, iNumLines, oTokenMap):
    lCarriageReturns = oTokenMap.get_token_indexes(parser.carriage_return)

    iStart = lCarriageReturns[iLine - 1] + 1
    try:
        iEnd = lCarriageReturns[iLine + iNumLines - 1]

        lTemp = lAllTokens[iStart:iEnd]

        return tokens.New(iStart, iLine + 1, lTemp)
    except IndexError:
        return None
示例#16
0
def get_m_tokens_before_and_n_tokens_after_token(iM, iN, lTokens, lAllTokens,
                                                 oTokenMap):
    lReturn = []

    lIndexes = utils.get_indexes_of_token_list(lTokens, oTokenMap)

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        iStart = iIndex - iM
        iEnd = iIndex + iN
        if start_index_exceeds_beginning_of_file(iStart):
            lMyTokens = []
            lMyTokens.append(parser.beginning_of_file())
            lMyTokens.extend(lAllTokens[0:iEnd + 1])
            oTokens = tokens.New(0, iLine, lMyTokens)
            lReturn.append(oTokens)
        else:
            lReturn.append(
                tokens.New(iStart, iLine, lAllTokens[iStart:iEnd + 1]))

    return lReturn
示例#17
0
def get_token_and_n_tokens_before_it_in_between_tokens_unless_token_is_found(lTokens, iTokens, oStart, oEnd, oStop, lAllTokens, oTokenMap):
    lReturn = []

    lIndexes = utils.filter_tokens_between_tokens_unless_token_exists_between_them(lTokens, oStart, oEnd, oStop, oTokenMap)

    for iIndex in lIndexes:
        iStart = iIndex - iTokens
        if iStart >= 0:
            iLine = oTokenMap.get_line_number_of_index(iStart)
            lReturn.append(tokens.New(iStart, iLine, lAllTokens[iStart:iIndex + 1]))

    return lReturn
示例#18
0
def get_token_and_n_tokens_before_it(lTokens, iTokens, lAllTokens, oTokenMap):
    lReturn = []

    lIndexes = utils.get_indexes_of_token_list(lTokens, oTokenMap)

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        iStart = iIndex - iTokens
        if iStart >= 0:
            lReturn.append(
                tokens.New(iStart, iLine, lAllTokens[iStart:iIndex + 1]))
    return lReturn
示例#19
0
    def test_add_violations_method(self):
        oRule = rule.Rule()
        self.assertEqual(oRule.violations, [])

        oTokens = tokens.New(0, 0, [])
        oViolation = violation.New(0, oTokens, '')

        oRule.add_violation(oViolation)
        self.assertEqual(len(oRule.violations), 1)
        oRule.add_violation(oViolation)
        oRule.add_violation(oViolation)
        self.assertEqual(len(oRule.violations), 3)
def get_sequence_of_tokens_not_matching(lTokens, lAllTokens, oTokenMap):

    lReturn = []
    lIndexes = oTokenMap.get_token_indexes(lTokens[0])

    for iIndex in lIndexes:
        for i in range(1, len(lTokens)):
            if not oTokenMap.is_token_at_index(lTokens[i], iIndex + i):
                iLine = oTokenMap.get_line_number_of_index(iIndex)
                lReturn.append(
                    tokens.New(iIndex, iLine, lAllTokens[iIndex:iIndex + 1]))
                break
    return lReturn
示例#21
0
def get_m_tokens_before_and_n_tokens_after_token(iM, iN, lTokens, lAllTokens, oTokenMap):
    lReturn = []

    lIndexes = utils.get_indexes_of_token_list(lTokens, oTokenMap)

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        iStart = iIndex - iM
        iEnd = iIndex + iN
        if iStart >= 0:
            lReturn.append(tokens.New(iStart, iLine, lAllTokens[iStart:iEnd + 1]))

    return lReturn
def get_token_and_n_tokens_after_it_when_between_tokens(
        lTokens, iTokens, oStart, oEnd, lAllTokens, oTokenMap):
    lReturn = []

    lIndexes = utils.filter_tokens_between_tokens(lTokens, oStart, oEnd,
                                                  oTokenMap)

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        iEnd = iIndex + iTokens + 1
        lReturn.append(tokens.New(iIndex, iLine, lAllTokens[iIndex:iEnd]))

    return lReturn
def get_association_elements_between_tokens(oStart, oEnd, lAllTokens,
                                            oTokenMap):
    lReturn = []
    lStartIndexes, lEndIndexes = oTokenMap.get_token_pair_indexes(oStart, oEnd)

    for iStart, iEnd in zip(lStartIndexes, lEndIndexes):
        iLine = oTokenMap.get_line_number_of_index(iStart)
        lTokens = lAllTokens[iStart:iEnd + 1]

        bStore = False
        iLineNumber = None
        lTemp = []
        for iToken, oToken in enumerate(lTokens):
            if isinstance(oToken, token.association_element.formal_part):
                bStore = True
                iStartIndex = iToken + iStart
                iLineNumber = iLine
            if isinstance(
                    oToken,
                    token.association_element.actual_part) and not bStore:
                bStore = True
                iStartIndex = iToken + iStart
                iLineNumber = iLine

            if bStore:
                lTemp.append(oToken)

            if isinstance(oToken, token.association_list.comma):
                lReturn.append(tokens.New(iStartIndex, iLineNumber, lTemp))
                lTemp = []
                bStore = False

            if isinstance(oToken, parser.carriage_return):
                iLine += 1

        if len(lTemp) > 0:
            lReturn.append(tokens.New(iStartIndex, iLineNumber, lTemp))

    return lReturn
示例#24
0
    def test_get_solution(self):
        oRule = rule.Rule()

        oTokens = tokens.New(0, 0, [])
        oViolation = violation.New(0, oTokens, 'Solution Line 0')

        oRule.add_violation(oViolation)

        oViolation = violation.New(1, oTokens, 'Solution Line 1')
        oRule.add_violation(oViolation)

        self.assertEqual(oRule._get_solution(0), 'Solution Line 0')
        self.assertEqual(oRule._get_solution(1), 'Solution Line 1')
def get_tokens_matching(lTokens, lAllTokens, oTokenMap):

    lReturn = []
    lIndexes = []

    for oToken in lTokens:
        lIndexes.extend(oTokenMap.get_token_indexes(oToken))

    lIndexes.sort()

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        lReturn.append(tokens.New(iIndex, iLine, [lAllTokens[iIndex]]))

    return lReturn
示例#26
0
def get_tokens_between_non_whitespace_token_and_token(right_token, lAllTokens,
                                                      oTokenMap):

    lReturn = []

    lStart, lEnd = get_start_and_end_indexes(right_token, lAllTokens,
                                             oTokenMap)

    for iStart, iEnd in zip(lStart, lEnd):

        iLine = oTokenMap.get_line_number_of_index(iStart)

        oTokens = tokens.New(iStart, iLine, lAllTokens[iStart:iEnd + 1])
        lReturn.append(oTokens)

    return lReturn
def get_tokens_matching_in_range_bounded_by_tokens(lTokens, oStart, oEnd, lAllTokens, oTokenMap):

        lStart, lEnd = oTokenMap.get_token_pair_indexes(oStart, oEnd)

        lIndexes = []
        for iStart, iEnd in zip(lStart, lEnd):
            for oToken in lTokens:
                lIndexes.extend(oTokenMap.get_token_indexes_between_indexes(oToken, iStart, iEnd))

        lIndexes.sort()

        lReturn = []
        for iIndex in lIndexes:
            iLine = oTokenMap.get_line_number_of_index(iIndex)
            lReturn.append(tokens.New(iIndex, iLine, [lAllTokens[iIndex]]))

        return lReturn
示例#28
0
def get_n_tokens_before_and_after_tokens(iToken, lTokens, lAllTokens,
                                         oTokenMap):
    lReturn = []
    lIndexes = []

    for oToken in lTokens:
        lIndexes.extend(oTokenMap.get_token_indexes(oToken))

    lIndexes.sort()

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        lReturn.append(
            tokens.New(iIndex - iToken, iLine,
                       lAllTokens[iIndex - iToken:iIndex + iToken + 1]))

    return lReturn
def get_tokens_bounded_by_tokens_if_token_is_between_them(oStart, oEnd, oToken, lAllObjects, oTokenMap):

    lReturn = []

    lStart, lEnd = oTokenMap.get_token_pair_indexes(oStart, oEnd)

    lIndexes = oTokenMap.get_token_indexes(oToken)

    for iStart, iEnd in zip(lStart, lEnd):
        for iIndex in lIndexes:
            if iStart < iIndex and iIndex < iEnd:
                lTemp = lAllObjects[iStart: iEnd + 1]
                iStartLine = oTokenMap.get_line_number_of_index(iStart)
                oToi = tokens.New(iStart, iStartLine, lTemp)
                oToi.set_token_value(iIndex - iStart)
                lReturn.append(oToi)

    return lReturn
def get_n_token_after_tokens(iToken, lTokens, lAllTokens, oTokenMap):
    lReturn = []
    lIndexes = []
    for oToken in lTokens:
        lTemp = oTokenMap.get_token_indexes(oToken)
        for iTemp in lTemp:
            iTokenIndex = iTemp
            for iCount in range(0, iToken):
                iTokenIndex = oTokenMap.get_index_of_next_non_whitespace_token(
                    iTokenIndex, bExcludeComments=True)
            lIndexes.append(iTokenIndex)

    lIndexes.sort()

    for iIndex in lIndexes:
        iLine = oTokenMap.get_line_number_of_index(iIndex)
        lReturn.append(tokens.New(iIndex, iLine, [lAllTokens[iIndex]]))

    return lReturn