Esempio n. 1
0
    def _add_package_name_to_result(self, result: AbstractResult) -> str:
        LOGGER.debug(
            f'extracting package name from base result {result.scanned_file_name}...'
        )
        list_of_words = result.scanned_tokens
        for _, obj, following in self._gen_word_read_ahead(list_of_words):
            if obj == JavaParsingKeyword.PACKAGE.value:
                read_ahead_string = self.create_read_ahead_string(
                    obj, following)

                package_name = pp.Word(pp.alphanums +
                                       CoreParsingKeyword.DOT.value)
                expression_to_match = pp.Keyword(JavaParsingKeyword.PACKAGE.value) + package_name.setResultsName(JavaParsingKeyword.PACKAGE_NAME.value) + \
                    pp.FollowedBy(CoreParsingKeyword.SEMICOLON.value)

                try:
                    parsing_result = expression_to_match.parseString(
                        read_ahead_string)
                except Exception as some_exception:
                    result.analysis.statistics.increment(
                        Statistics.Key.PARSING_MISSES)
                    LOGGER.warning(
                        f'warning: could not parse result {result=}\n{some_exception}'
                    )
                    LOGGER.warning(f'next tokens: {obj} {following[:10]}')
                    continue

                result.module_name = parsing_result.package_name

                result.analysis.statistics.increment(
                    Statistics.Key.PARSING_HITS)
                LOGGER.debug(
                    f'package found: {parsing_result.package_name} and added to result'
                )
Esempio n. 2
0
    def get_guild_role_parser(self, guild):
        """
        Create a role parser for the specified guild.

        :param guild:
        :return:
        """
        intersect = pp.CaselessKeyword(self.INTERSECT_TOKEN)
        union = pp.CaselessKeyword(self.UNION_TOKEN)
        complement = pp.CaselessKeyword(self.COMPLEMENT_TOKEN)
        left_paren = pp.Literal(self.LEFT_PAREN_TOKEN)
        right_paren = pp.Literal(self.RIGHT_PAREN_TOKEN)
        role = pp.Word(pp.alphanums) | pp.QuotedString("'", escChar="\\")

        expression = pp.Forward()
        term = pp.Forward()
        factor = pp.Forward()
        factor <<= left_paren + expression + pp.FollowedBy(
            right_paren) + right_paren | complement + factor | role
        term <<= factor + intersect + term | factor
        expression <<= term + union + expression | term

        factor.setParseAction(self.get_guild_factor_action(guild))
        term.setParseAction(self.get_guild_term_action())
        expression.setParseAction(self.expression_action)

        role_statement = pp.StringStart() + expression + pp.StringEnd()

        return role_statement
Esempio n. 3
0
    def generate_entity_results_from_analysis(self, analysis):
        LOGGER.debug(f'generating entity results...')
        filtered_results = {
            k: v
            for (k, v) in self.results.items()
            if v.analysis is analysis and isinstance(v, AbstractFileResult)
        }

        result: AbstractFileResult
        for _, result in filtered_results.items():

            entity_keywords: List[str] = [GroovyParsingKeyword.CLASS.value]
            entity_name = pp.Word(pp.alphanums)
            match_expression = pp.Keyword(GroovyParsingKeyword.CLASS.value) + \
                entity_name.setResultsName(CoreParsingKeyword.ENTITY_NAME.value) + \
                pp.Optional(pp.Keyword(GroovyParsingKeyword.EXTENDS.value) +
                            entity_name.setResultsName(CoreParsingKeyword.INHERITED_ENTITY_NAME.value)) + \
                pp.SkipTo(pp.FollowedBy(GroovyParsingKeyword.OPEN_SCOPE.value))

            comment_keywords: Dict[str, str] = {
                CoreParsingKeyword.LINE_COMMENT.value:
                GroovyParsingKeyword.INLINE_COMMENT.value,
                CoreParsingKeyword.START_BLOCK_COMMENT.value:
                GroovyParsingKeyword.START_BLOCK_COMMENT.value,
                CoreParsingKeyword.STOP_BLOCK_COMMENT.value:
                GroovyParsingKeyword.STOP_BLOCK_COMMENT.value
            }
            entity_results = result.generate_entity_results_from_scopes(
                entity_keywords, match_expression, comment_keywords)

            for entity_result in entity_results:
                self._add_inheritance_to_entity_result(entity_result)
                self._add_imports_to_entity_result(entity_result)
                self.create_unique_entity_name(entity_result)
                self._results[entity_result.unique_name] = entity_result
Esempio n. 4
0
class Tokens(object):
    # shared tokens
    delim_chars = '[]{},'
    pause = pp.FollowedBy(pp.Word(delim_chars) | pp.StringEnd())
    number = (pp.pyparsing_common.number + pause)
    quoted_string = pp.QuotedString('"', escChar='\\')
    true = (pp.Regex(r'(True|true|yes|on)') + pause). \
        setParseAction(lambda _: True)
    false = (pp.Regex(r'(False|false|no|off)') + pause). \
        setParseAction(lambda _: False)
    unquoted_string = pp.CharsNotIn(delim_chars). \
        setParseAction(lambda toks: toks[0].strip())
    empty_value = pp.Empty(). \
        setParseAction(lambda _: '')

    # tokens for configs
    identifier = pp.pyparsing_common.identifier.copy()
    comma = pp.Literal(',').suppress()
    assign = pp.Literal('=').suppress()
    config_value = (
        number | true | false | quoted_string | unquoted_string | empty_value)
    key_value_pair = (identifier + assign + config_value). \
        setParseAction(lambda toks: (toks[0], toks[1]))
    key_value_pair_list = pp.Optional(
        key_value_pair + pp.ZeroOrMore(comma + key_value_pair))

    # tokens for tags
    tag = quoted_string | unquoted_string | pp.Empty().suppress()
    tag_list = pp.Optional(tag + pp.ZeroOrMore(comma + tag))
Esempio n. 5
0
 def _exec_form_grammar():
     start = "["
     end = "]"
     src = InstructionBase.pp_quoted(unquote=True).setResultsName("src", listAllMatches=True)
     following_src = pp.ZeroOrMore("," + src + ~pp.FollowedBy(end))
     dest = "," + InstructionBase.pp_quoted(unquote=True).setResultsName("dest")
     grammar = start + src + following_src + dest + end
     return grammar
Esempio n. 6
0
class PortWithProfile(Node):
    """
    Variant of :class:`Port` that is used by "card" records inside
    the "Ports" property. It differs from the normal port syntax by having
    different entries inside the last section. Availability is not listed
    here, only priority. Priority does not have a colon before the actual
    number. This port is followed by profile assignment.
    """
    __fragments__ = {
        'name': 'port-name',
        'label': 'port-label',
        'priority': 'port-priority',
        'latency_offset': 'port-latency-offset',
        'availability': 'port-availability',
        'properties': lambda t: t['port-properties'].asList(),
        'profile_list': lambda t: t['port-profile-list'].asList(),
    }

    __syntax__ = (
        p.Word(p.alphanums + "-;").setResultsName('port-name') +
        p.Suppress(':')
        # This part was very tricky to write. The label is basically arbitrary
        # localized Unicode text. We want to grab all of it in one go but
        # without consuming the upcoming and latest '(' character or the space
        # that comes immediately before.
        #
        # The syntax here combines a sequence of words, as defined by anything
        # other than a space and '(', delimited by a single whitespace.
        + p.Combine(
            p.OneOrMore(~p.FollowedBy(p.Regex('\(.+?\)') + p.LineEnd()) +
                        p.Regex('[^ \n]+') + p.White().suppress()),
            ' ').setResultsName('port-label') + p.Suppress('(') +
        p.Keyword('priority').suppress() + p.Optional(p.Suppress(':')) +
        p.Word(p.nums).setParseAction(lambda t: int(t[0])).setResultsName(
            'port-priority') + p.Optional(
                p.MatchFirst([
                    p.Suppress(',') + p.Keyword('latency offset:').suppress() +
                    p.Word(p.nums).setParseAction(lambda t: int(t[0])) +
                    p.Literal("usec").suppress(),
                    p.Empty().setParseAction(lambda t: '')
                ]).setResultsName('port-latency-offset')) + p.Optional(
                    p.MatchFirst([
                        p.Suppress(',') + p.Literal('not available'),
                        p.Suppress(',') + p.Literal('available'),
                        p.Empty().setParseAction(lambda t: '')
                    ]).setResultsName('port-availability')) + p.Suppress(')') +
        p.LineEnd().suppress() + p.Optional(
            p.MatchFirst([
                p.LineStart().suppress() + p.NotAny(p.White(' ')) +
                p.White('\t').suppress() + p.Keyword('Properties:').suppress()
                + p.LineEnd().suppress() + PropertyAttributeValue,
                p.Empty().setParseAction(lambda t: [])
            ]).setResultsName('port-properties')) +
        p.White('\t', max=3).suppress() +
        p.Literal("Part of profile(s)").suppress() + p.Suppress(":") +
        p.delimitedList(
            p.Word(p.alphanums + "+-:"),
            ", ").setResultsName("port-profile-list")).setResultsName("port")
Esempio n. 7
0
    def _add_imports_to_result(self, result: AbstractResult, analysis):
        LOGGER.debug(
            f'extracting imports from base result {result.scanned_file_name}...'
        )
        list_of_words_with_newline_strings = result.scanned_tokens
        source_string_no_comments = self._filter_source_tokens_without_comments(
            list_of_words_with_newline_strings,
            JavaScriptParsingKeyword.INLINE_COMMENT.value,
            JavaScriptParsingKeyword.START_BLOCK_COMMENT.value,
            JavaScriptParsingKeyword.STOP_BLOCK_COMMENT.value)
        filtered_list_no_comments = self.preprocess_file_content_and_generate_token_list_by_mapping(
            source_string_no_comments, self._token_mappings)

        for _, obj, following in self._gen_word_read_ahead(
                filtered_list_no_comments):
            if obj == JavaScriptParsingKeyword.IMPORT.value:
                read_ahead_string = self.create_read_ahead_string(
                    obj, following)

                valid_name = pp.Word(pp.alphanums +
                                     CoreParsingKeyword.AT.value +
                                     CoreParsingKeyword.DOT.value +
                                     CoreParsingKeyword.ASTERISK.value +
                                     CoreParsingKeyword.UNDERSCORE.value +
                                     CoreParsingKeyword.DASH.value +
                                     CoreParsingKeyword.SLASH.value)
                expression_to_match = pp.SkipTo(pp.Literal(JavaScriptParsingKeyword.FROM.value)) + pp.Literal(JavaScriptParsingKeyword.FROM.value) + \
                    pp.Suppress(pp.Literal(CoreParsingKeyword.SINGLE_QUOTE.value)) + \
                    pp.FollowedBy(pp.OneOrMore(valid_name.setResultsName(CoreParsingKeyword.IMPORT_ENTITY_NAME.value)))

                try:
                    parsing_result = expression_to_match.parseString(
                        read_ahead_string)
                except Exception as some_exception:
                    result.analysis.statistics.increment(
                        Statistics.Key.PARSING_MISSES)
                    LOGGER.warning(
                        f'warning: could not parse result {result=}\n{some_exception}'
                    )
                    LOGGER.warning(
                        f'next tokens: {[obj] + following[:AbstractParsingCore.Constants.MAX_DEBUG_TOKENS_READAHEAD.value]}'
                    )
                    continue

                analysis.statistics.increment(Statistics.Key.PARSING_HITS)

                # ignore any dependency substring from the config ignore list
                dependency = getattr(
                    parsing_result,
                    CoreParsingKeyword.IMPORT_ENTITY_NAME.value)
                if self._is_dependency_in_ignore_list(dependency, analysis):
                    LOGGER.debug(
                        f'ignoring dependency from {result.unique_name} to {dependency}'
                    )
                else:
                    result.scanned_import_dependencies.append(dependency)
                    LOGGER.debug(f'adding import: {dependency}')
Esempio n. 8
0
 def _build_assignments_grammar():
     key = InstructionBase.pp_word(
         without="=", unquote=True) + ~pp.FollowedBy(pp.White())
     value = InstructionBase.pp_word(unquote=True)
     assignment = (
         key +
         pp.Combine(pp.Suppress("=") +
                    pp.Optional(value, default=""))).setName("assignment")
     assignments = pp.OneOrMore(assignment).setParseAction(
         InstructionBase.dictionize)
     assignments = assignments.setResultsName("assignments")
     return assignments
Esempio n. 9
0
def _get_parser():
    double_escape = pp.Combine(pp.Literal(_DOUBLE_ESCAPE) + pp.MatchFirst([pp.FollowedBy(_REF_OPEN), pp.FollowedBy(_REF_CLOSE),
                               pp.FollowedBy(_INV_OPEN), pp.FollowedBy(_INV_CLOSE)])).setParseAction(pp.replaceWith(_ESCAPE))

    ref_open = pp.Literal(_REF_OPEN).suppress()
    ref_close = pp.Literal(_REF_CLOSE).suppress()
    ref_not_open = ~pp.Literal(_REF_OPEN) + ~pp.Literal(_REF_ESCAPE_OPEN) + ~pp.Literal(_REF_DOUBLE_ESCAPE_OPEN)
    ref_not_close = ~pp.Literal(_REF_CLOSE) + ~pp.Literal(_REF_ESCAPE_CLOSE) + ~pp.Literal(_REF_DOUBLE_ESCAPE_CLOSE)
    ref_escape_open = pp.Literal(_REF_ESCAPE_OPEN).setParseAction(pp.replaceWith(_REF_OPEN))
    ref_escape_close = pp.Literal(_REF_ESCAPE_CLOSE).setParseAction(pp.replaceWith(_REF_CLOSE))
    ref_text = pp.CharsNotIn(_REF_EXCLUDES) | pp.CharsNotIn(_REF_CLOSE_FIRST, exact=1)
    ref_content = pp.Combine(pp.OneOrMore(ref_not_open + ref_not_close + ref_text))
    ref_string = pp.MatchFirst([double_escape, ref_escape_open, ref_escape_close, ref_content]).setParseAction(_string)
    ref_item = pp.Forward()
    ref_items = pp.OneOrMore(ref_item)
    reference = (ref_open + pp.Group(ref_items) + ref_close).setParseAction(_reference)
    ref_item << (reference | ref_string)

    inv_open = pp.Literal(_INV_OPEN).suppress()
    inv_close = pp.Literal(_INV_CLOSE).suppress()
    inv_not_open = ~pp.Literal(_INV_OPEN) + ~pp.Literal(_INV_ESCAPE_OPEN) + ~pp.Literal(_INV_DOUBLE_ESCAPE_OPEN)
    inv_not_close = ~pp.Literal(_INV_CLOSE) + ~pp.Literal(_INV_ESCAPE_CLOSE) + ~pp.Literal(_INV_DOUBLE_ESCAPE_CLOSE)
    inv_escape_open = pp.Literal(_INV_ESCAPE_OPEN).setParseAction(pp.replaceWith(_INV_OPEN))
    inv_escape_close = pp.Literal(_INV_ESCAPE_CLOSE).setParseAction(pp.replaceWith(_INV_CLOSE))
    inv_text = pp.CharsNotIn(_INV_CLOSE_FIRST)
    inv_content = pp.Combine(pp.OneOrMore(inv_not_close + inv_text))
    inv_string = pp.MatchFirst([double_escape, inv_escape_open, inv_escape_close, inv_content]).setParseAction(_string)
    inv_items = pp.OneOrMore(inv_string)
    export = (inv_open + pp.Group(inv_items) + inv_close).setParseAction(_invquery)

    text = pp.CharsNotIn(_EXCLUDES) | pp.CharsNotIn('', exact=1)
    content = pp.Combine(pp.OneOrMore(ref_not_open + inv_not_open + text))
    string = pp.MatchFirst([double_escape, ref_escape_open, inv_escape_open, content]).setParseAction(_string)

    item = reference | export | string
    line = pp.OneOrMore(item) + pp.StringEnd()
    return line
Esempio n. 10
0
    def _add_inheritance_to_entity_result(self, result: AbstractEntityResult):
        LOGGER.debug(
            f'extracting inheritance from entity result {result.entity_name}...'
        )
        list_of_words = result.scanned_tokens
        for _, obj, following in self._gen_word_read_ahead(list_of_words):
            if obj == GroovyParsingKeyword.CLASS.value:
                read_ahead_string = self.create_read_ahead_string(
                    obj, following)

                entity_name = pp.Word(pp.alphanums)
                expression_to_match = pp.Keyword(GroovyParsingKeyword.CLASS.value) + entity_name.setResultsName(CoreParsingKeyword.ENTITY_NAME.value) + \
                    pp.Optional(pp.Keyword(GroovyParsingKeyword.EXTENDS.value) + entity_name.setResultsName(CoreParsingKeyword.INHERITED_ENTITY_NAME.value)) + \
                    pp.SkipTo(pp.FollowedBy(GroovyParsingKeyword.OPEN_SCOPE.value))

                try:
                    parsing_result = expression_to_match.parseString(
                        read_ahead_string)
                except Exception as some_exception:
                    result.analysis.statistics.increment(
                        Statistics.Key.PARSING_MISSES)
                    LOGGER.warning(
                        f'warning: could not parse result {result=}\n{some_exception}'
                    )
                    LOGGER.warning(f'next tokens: {obj} {following[:10]}')
                    continue

                if len(parsing_result) > 0:
                    parsing_result = expression_to_match.parseString(
                        read_ahead_string)
                    if getattr(parsing_result,
                               CoreParsingKeyword.INHERITED_ENTITY_NAME.value
                               ) is not None and bool(
                                   getattr(
                                       parsing_result, CoreParsingKeyword.
                                       INHERITED_ENTITY_NAME.value)):

                        result.analysis.statistics.increment(
                            Statistics.Key.PARSING_HITS)
                        LOGGER.debug(
                            f'found inheritance entity {getattr(parsing_result, CoreParsingKeyword.INHERITED_ENTITY_NAME.value)} for entity name: {getattr(parsing_result, CoreParsingKeyword.ENTITY_NAME.value)} and added to result'
                        )
                        result.scanned_inheritance_dependencies.append(
                            getattr(
                                parsing_result, CoreParsingKeyword.
                                INHERITED_ENTITY_NAME.value))
Esempio n. 11
0
    def _attributeParser():
        # --- attribute parser ---
        attributeIndicator = p.LineStart() + p.Suppress(p.Literal('@'))
        attributeName = p.Word(p.alphanums).setResultsName('attributename')
        attributeSeparator = p.Suppress(p.Literal('::'))

        # TODO force case insensitivity in attributeMode keyword match
        # TODO add debug names
        # TODO add a conditional debug flag

        attributeMode = (p.Word(MODE_KEYWORD_SINGLE)
                         | p.Word(MODE_KEYWORD_MULTIPLE)).setResultsName(
                             'attributemode') + p.Literal(':').suppress()

        attributeType = (p.Word(
            p.alphanums).setResultsName('attributetype')).setParseAction(caps)

        attributePosargs = p.ZeroOrMore(
            (p.Word(p.alphanums)
             | p.Combine(p.Literal('[') + p.SkipTo(']') + p.Literal(']'))) +
            ~p.FollowedBy(p.Literal('=')) +
            p.Optional(p.Literal(',').suppress())).setResultsName('posargs')

        kwargprintables = p.printables.translate(
            str.maketrans('', '', '=,[]()'))

        attributeKwargs = p.ZeroOrMore(
            p.Group(
                p.Word(p.alphanums).setResultsName('keyword') +
                p.Literal('=').suppress() +
                (p.Word(kwargprintables) | p.Combine(
                    p.Literal('[').suppress() + p.SkipTo(']') +
                    p.Literal(']').suppress())).setResultsName('value') +
                p.Optional(
                    p.Literal(',').suppress()
                )  #TODO figure out how to make quotes work as enclosers instead of []
            )).setResultsName('kwargs')

        attributeArgs = (
            p.Literal('(').suppress() + attributePosargs + attributeKwargs +
            p.Literal(')').suppress()).setResultsName('attributeargs')

        attributeList = attributeIndicator + attributeName + attributeSeparator + \
                        attributeMode + attributeType + p.Optional(attributeArgs)
        return attributeList
Esempio n. 12
0
    def attributeParser():
        # --- attribute parser ---
        attributeIndicator = p.LineStart() + p.Suppress(p.Literal('@'))
        attributeName = p.Word(p.alphanums).setResultsName('attributename')
        attributeSeparator = p.Suppress(p.Literal('::'))

        # TODO force case insensitivity in attributeMode keyword match
        # TODO add debug names
        # TODO add a conditional debug flag

        attributeMode = (p.Word(MODE_KEYWORD_SINGLE)
                         | p.Word(MODE_KEYWORD_MULTIPLE)).setResultsName(
                             'attributemode') + p.Literal(':').suppress()

        attributeType = (p.Word(
            p.alphanums).setResultsName('attributetype')).setParseAction(caps)

        attributePosargs = p.ZeroOrMore(
            (p.Word(p.alphanums)
             | p.Combine(p.Literal('[') + p.SkipTo(']') + p.Literal(']'))) +
            ~p.FollowedBy(p.Literal('=')) +
            p.Optional(p.Literal(',').suppress())).setResultsName('posargs')

        attributeKwargs = p.ZeroOrMore(
            p.Group(
                p.Word(p.alphanums).setResultsName('keyword') +
                p.Literal('=').suppress() +
                (p.Word(p.alphanums)
                 | p.Combine(p.Literal('[') + p.SkipTo(']') +
                             p.Literal(']'))).setResultsName('value') +
                p.Optional(p.Literal(',').suppress()))).setResultsName(
                    'kwargs')

        attributeArgs = (
            p.Literal('(').suppress() + attributePosargs + attributeKwargs +
            p.Literal(')').suppress()).setResultsName('attributeargs')

        attributeList = p.Group(attributeIndicator + attributeName +
                                attributeSeparator + attributeMode +
                                attributeType + p.Optional(attributeArgs))
        return attributeList
Esempio n. 13
0
    def generate_entity_results_from_analysis(self, analysis):
        LOGGER.debug(f'generating entity results...')
        filtered_results: Dict[str, FileResult] = {
            k: v
            for (k, v) in self.results.items()
            if v.analysis is analysis and isinstance(v, AbstractFileResult)
        }

        result: FileResult
        for _, result in filtered_results.items():

            entity_keywords: List[str] = [
                SwiftParsingKeyword.CLASS.value,
                SwiftParsingKeyword.STRUCT.value
            ]
            entity_name = pp.Word(pp.alphanums)

            match_expression = (pp.Keyword(SwiftParsingKeyword.CLASS.value) | pp.Keyword(SwiftParsingKeyword.STRUCT.value)) + \
                (~pp.Keyword(SwiftParsingKeyword.VAR.value) & ~pp.Keyword(SwiftParsingKeyword.FUNC.value)) + \
                entity_name.setResultsName(CoreParsingKeyword.ENTITY_NAME.value) + \
                pp.Optional(pp.Keyword(CoreParsingKeyword.COLON.value)) + pp.SkipTo(pp.FollowedBy(SwiftParsingKeyword.OPEN_SCOPE.value))

            comment_keywords: Dict[str, str] = {
                CoreParsingKeyword.LINE_COMMENT.value:
                SwiftParsingKeyword.INLINE_COMMENT.value,
                CoreParsingKeyword.START_BLOCK_COMMENT.value:
                SwiftParsingKeyword.START_BLOCK_COMMENT.value,
                CoreParsingKeyword.STOP_BLOCK_COMMENT.value:
                SwiftParsingKeyword.STOP_BLOCK_COMMENT.value
            }
            entity_results = result.generate_entity_results_from_scopes(
                entity_keywords, match_expression, comment_keywords)

            for entity_result in entity_results:
                LOGGER.debug(f'{entity_result.entity_name=}')
                self._add_inheritance_to_entity_result(entity_result)
                self._results[entity_result.entity_name] = entity_result

        self._add_imports_to_entity_results(analysis)
        self._add_extensions_to_entity_results(analysis)
Esempio n. 14
0
def make_parser():
    """Generate the pyparsing parser for hand strings."""
    ranks_str = ''.join(ranks)
    ranks_str += ranks_str.lower()
    suits_str = ''.join(suits)
    suits_str += suits_str.lower()
    suitedness = pyparsing.Word("os", exact=1).setName("suitedness")
    card = pyparsing.Word(ranks_str, suits_str, exact=2).setName("card")
    hand = card * 2
    hand.setParseAction(lambda s, loc, toks: ''.join(toks))
    digits = pyparsing.Word(pyparsing.nums)
    natural_number = pyparsing.Word('123456789', pyparsing.nums)
    decimal = natural_number ^ \
        (pyparsing.Optional(pyparsing.Literal('0')) +
         pyparsing.Literal('.') + digits) ^ \
        (natural_number + pyparsing.Literal('.') + digits) ^ \
        (natural_number + pyparsing.Literal('.'))
    decimal.setParseAction(lambda s, loc, toks: ''.join(toks))
    weight = pyparsing.Group(decimal +
                             pyparsing.Optional(pyparsing.Literal('%')))
    handtype = pyparsing.Word(ranks_str, exact=2) + \
        pyparsing.Optional(suitedness) + \
        ~pyparsing.FollowedBy(pyparsing.Literal('%') ^ pyparsing.Literal('('))
    handtype.setParseAction(lambda s, loc, toks: ''.join(toks))
    tag = pyparsing.Literal('#') + pyparsing.Word(pyparsing.alphanums + '_') \
        + pyparsing.Literal('#')
    handtype_group = pyparsing.Group(handtype
                                     ^ (handtype + pyparsing.Literal('-') +
                                        handtype)
                                     ^ (handtype + pyparsing.Literal('+'))
                                     ^ hand ^ tag)
    hand_group_list = pyparsing.Group(pyparsing.delimitedList(handtype_group))
    weighted_hand_group_list = pyparsing.Group((
        weight + pyparsing.Literal('(').suppress() + hand_group_list +
        pyparsing.Literal(')').suppress()) ^ hand_group_list)
    handrange = pyparsing.Optional(
        pyparsing.delimitedList(
            weighted_hand_group_list)) + pyparsing.StringEnd()
    return handrange
Esempio n. 15
0
    def build_element(self):
        """
        A helper function to assemble the command parser's top level element.
        """
        command_elements = []

        for command in self.commands.values():
            command_elements.append(command.build_element())

        # Enforce command at string start
        element = pp.StringStart()

        # Attempt to match command name only first using lookahead
        commands_element = pp.MatchFirst(pp.Keyword(c) for c in self.commands)
        element -= pp.FollowedBy(commands_element)

        # If the above lookahead element matches, the parser will then look for
        # one of the full command string. Otherwise, it will stop immediately.
        element -= pp.MatchFirst(command_elements).setParseAction(self.on_match)

        # Enforce no extra arguments.
        element -= pp.StringEnd()

        return element
Esempio n. 16
0
def parser_factory(styler):
    """Builds the repr() parser."""
    squo = styler('class:string', "'")
    dquo = styler('class:string', '"')

    esc_single = pp.oneOf(r'\\ \' \" \n \r \t')
    esc_hex = pp.Literal(r'\x') + pp.Word(pp.hexnums, exact=2)
    escs = styler('class:escape', esc_single | esc_hex)

    control_chars = ''.join(map(chr, range(32))) + '\x7f'
    normal_chars_squo = pp.CharsNotIn(control_chars + r"\'")
    chars_squo = styler('class:string', normal_chars_squo) | escs
    normal_chars_dquo = pp.CharsNotIn(control_chars + r'\"')
    chars_dquo = styler('class:string', normal_chars_dquo) | escs

    skip_white = pp.Optional(pp.White())
    bytes_prefix = pp.Optional(styler('class:string_prefix', 'b'))
    string_squo = skip_white + bytes_prefix + squo - pp.ZeroOrMore(
        chars_squo) + squo
    string_dquo = skip_white + bytes_prefix + dquo - pp.ZeroOrMore(
        chars_dquo) + dquo
    string = string_squo | string_dquo
    string.leaveWhitespace()

    address = styler('class:address', '0x' + pp.Word(pp.hexnums))
    number = styler('class:number', ppc.number)
    const = pp.oneOf('True False None NotImplemented Ellipsis ...')
    const = styler('class:constant', const)
    kwarg = styler('class:kwarg', ppc.identifier) + styler(
        'class:operator', '=')
    call = styler('class:call', ppc.identifier) + pp.FollowedBy('(')
    magic = styler('class:magic', pp.Regex(r'__[a-zA-Z0-9_]+__'))

    token = string | address | number | const | kwarg | call | magic
    token.parseWithTabs()
    return pp.originalTextFor(token)
Esempio n. 17
0
def __get_rule_parser() -> pyparsing.ParserElement:
    # This function defines the rule grammar

    # How to add new rule expressions:
    #   1 Create new grammar rules in a way similar to existing rsc_expr and
    #     op_expr. Use setName for better description of a grammar when printed.
    #     Use setResultsName for an easy access to parsed parts.
    #   2 Create new classes in expression_part module, probably one for each
    #     type of expression. Those are data containers holding the parsed data
    #     independent of the parser.
    #   3 Create builders for the new classes and connect them to created
    #     grammar rules using setParseAction.
    #   4 Add the new expressions into simple_expr definition.
    #   5 Test and debug the whole thing.

    node_attr_unary_expr = pyparsing.And([
        # operator
        pyparsing.Or([
            pyparsing.CaselessKeyword(op)
            for op in _token_to_node_expr_unary_op
        ]).setResultsName("operator"),
        # attribute name
        # It can by any string containing any characters except whitespace
        # (token separator) and "()" (brackets).
        pyparsing.Regex(r"[^\s()]+").setName("<attribute name>"
                                             ).setResultsName("attr_name"),
    ])
    node_attr_unary_expr.setParseAction(__build_node_attr_unary_expr)

    node_attr_binary_expr = pyparsing.And([
        # attribute name
        # It can by any string containing any characters except whitespace
        # (token separator) and "()" (brackets).
        pyparsing.Regex(r"[^\s()]+").setName("<attribute name>"
                                             ).setResultsName("attr_name"),
        # operator
        pyparsing.Or([
            pyparsing.CaselessKeyword(op)
            for op in _token_to_node_expr_binary_op
        ]).setResultsName("operator"),
        # attribute type
        pyparsing.Optional(
            pyparsing.Or([
                pyparsing.CaselessKeyword(type_)
                for type_ in _token_to_node_expr_type
            ])).setName("<attribute type>").setResultsName("attr_type"),
        # attribute value
        # It can by any string containing any characters except whitespace
        # (token separator) and "()" (brackets).
        pyparsing.Regex(r"[^\s()]+").setName("<attribute value>"
                                             ).setResultsName("attr_value"),
    ])
    node_attr_binary_expr.setParseAction(__build_node_attr_binary_expr)

    date_unary_expr = pyparsing.And([
        pyparsing.CaselessKeyword("date"),
        # operator
        pyparsing.Or([
            pyparsing.CaselessKeyword(op)
            for op in _token_to_date_expr_unary_op
        ]).setResultsName("operator"),
        # date
        # It can by any string containing any characters except whitespace
        # (token separator) and "()" (brackets).
        # The actual value should be validated elsewhere.
        pyparsing.Regex(r"[^\s()]+").setName("<date>").setResultsName("date"),
    ])
    date_unary_expr.setParseAction(__build_date_unary_expr)

    date_inrange_expr = pyparsing.And([
        pyparsing.CaselessKeyword("date"),
        pyparsing.CaselessKeyword("in_range"),
        # date
        # It can by any string containing any characters except whitespace
        # (token separator) and "()" (brackets).
        # The actual value should be validated elsewhere.
        # The Regex matches 'to'. In order to prevent that, FollowedBy is
        # used.
        pyparsing.Optional(
            pyparsing.And([
                pyparsing.Regex(r"[^\s()]+").setName(
                    "[<date>]").setResultsName("date1"),
                pyparsing.FollowedBy(pyparsing.CaselessKeyword("to")),
            ])),
        pyparsing.CaselessKeyword("to"),
        pyparsing.Or([
            # date
            # It can by any string containing any characters except
            # whitespace (token separator) and "()" (brackets).
            # The actual value should be validated elsewhere.
            pyparsing.Regex(r"[^\s()]+").setName("<date>").setResultsName(
                "date2"),
            # duration
            pyparsing.And([
                pyparsing.CaselessKeyword("duration"),
                __get_date_common_parser_part().setResultsName("duration"),
            ]),
        ]),
    ])
    date_inrange_expr.setParseAction(__build_date_inrange_expr)

    datespec_expr = pyparsing.And([
        pyparsing.CaselessKeyword("date-spec"),
        __get_date_common_parser_part().setResultsName("datespec"),
    ])
    datespec_expr.setParseAction(__build_datespec_expr)

    rsc_expr = pyparsing.And([
        pyparsing.CaselessKeyword("resource"),
        # resource name
        # Up to three parts seperated by ":". The parts can contain any
        # characters except whitespace (token separator), ":" (parts
        # separator) and "()" (brackets).
        pyparsing.Regex(
            r"(?P<standard>[^\s:()]+)?:(?P<provider>[^\s:()]+)?:(?P<type>[^\s:()]+)?"
        ).setName("<resource name>"),
    ])
    rsc_expr.setParseAction(__build_rsc_expr)

    op_interval = pyparsing.And([
        pyparsing.CaselessKeyword("interval"),
        # no spaces allowed around the "="
        pyparsing.Literal("=").leaveWhitespace(),
        # interval value: number followed by a time unit, no spaces allowed
        # between the number and the unit thanks to Combine being used
        pyparsing.Combine(
            pyparsing.And([
                pyparsing.Word(pyparsing.nums),
                pyparsing.Optional(pyparsing.Word(pyparsing.alphas)),
            ])
        ).setName("<integer>[<time unit>]").setResultsName("interval_value"),
    ])
    op_expr = pyparsing.And([
        pyparsing.CaselessKeyword("op"),
        # operation name
        # It can by any string containing any characters except whitespace
        # (token separator) and "()" (brackets). Operations are defined in
        # agents' metadata which we do not have access to (e.g. when the
        # user sets operation "my_check" and doesn't even specify agent's
        # name).
        pyparsing.Regex(r"[^\s()]+").setName("<operation name>"
                                             ).setResultsName("name"),
        pyparsing.Optional(op_interval).setResultsName("interval"),
    ])
    op_expr.setParseAction(__build_op_expr)

    # Ordering matters here as the first expression which matches wins. This is
    # mostly not an issue as the expressions don't overlap and the grammar is
    # not ambiguous. There are, exceptions, however:
    # 1) date gt something
    #   This can be either a date_unary_expr or a node_attr_binary_expr. We
    #   want it to be a date expression. If the user wants it to be a node
    #   attribute expression, they can do it like this: 'date gt <type>
    #   something' where <type> is an item of _token_to_node_expr_type. That
    #   way, both date and node attribute expression can be realized.
    simple_expr = pyparsing.Or([
        date_unary_expr,
        date_inrange_expr,
        datespec_expr,
        node_attr_unary_expr,
        node_attr_binary_expr,
        rsc_expr,
        op_expr,
    ])

    # See pyparsing examples
    # https://github.com/pyparsing/pyparsing/blob/master/examples/simpleBool.py
    # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py
    bool_operator = pyparsing.Or(
        [pyparsing.CaselessKeyword("and"),
         pyparsing.CaselessKeyword("or")])
    bool_expr = pyparsing.infixNotation(
        simple_expr,
        # By putting both "and" and "or" in one tuple we say they have the same
        # priority. This is consistent with legacy pcs parsers. And it is how
        # it should be, they work as a glue between "simple_expr"s.
        [(bool_operator, 2, pyparsing.opAssoc.LEFT, __build_bool_tree)],
    )

    return pyparsing.Or([bool_expr, simple_expr])
Esempio n. 18
0
required = value
requireds = pp.delimitedList(required).setResultsName("required")

token_ = ~boolean + pp.Combine(
    pp.Word(pp.alphas + "_") + pp.Optional(pp.Word(pp.alphanums + "_")))

key = token_.copy()
is_ = pp.Suppress(pp.Literal("="))

key_value_pair = pp.Group(key + is_ + value).setParseAction(
    lambda tokens: KeyValuePair(*tokens[0]))

token = token_.copy().setParseAction(lambda tokens: Token(tokens[0]))

optional = key_value_pair | token
optionals = pp.delimitedList(optional).setResultsName("optional")

options = (requireds + pp.Suppress(",") + optionals) | requireds | optionals

identifier = pp.Word(pp.alphas, max=1).setResultsName(
    "identifier") + ~pp.FollowedBy(pp.Literal("=") | pp.Word(pp.alphas))

column_definition = pp.Group(identifier + pp.Optional(
    pp.Suppress(pp.Literal("[")) + options + pp.Suppress(pp.Literal("]"))))
column_definition_ << column_definition
column_definitions = pp.delimitedList(column_definition)
column_definitions_ << column_definitions

if __name__ == "__main__":
    pass
Esempio n. 19
0
def _parse_ios_interfaces(data,
                          acls_as_list=True,
                          auto_cleanup=True,
                          skip_disabled=True):
    """
    Walks through a IOS interface config and returns a dict of parts.

    Intended for use by `~trigger.cmds.NetACLInfo.ios_parse()` but was written
    to be portable.

    :param acls_as_list:
        Whether you want acl names as strings instead of list members, e.g.

    :param auto_cleanup:
        Whether you want to pass results through cleanup_results(). Default: ``True``)
        "ABC123" vs. ['ABC123']. (Default: ``True``)

    :param skip_disabled:
        Whether to skip disabled interfaces. (Default: ``True``)
    """
    import pyparsing as pp

    # Setup
    bang = pp.Literal("!").suppress()
    anychar = pp.Word(pp.printables)
    nonbang = pp.Word(''.join([x for x in pp.printables if x != "!"]) +
                      '\n\r\t ')
    comment = bang + pp.restOfLine.suppress()

    #weird things to ignore in foundries
    aaa_line = pp.Literal("aaa").suppress() + pp.restOfLine.suppress()
    module_line = pp.Literal("module").suppress() + pp.restOfLine.suppress()
    startup_line = pp.Literal("Startup").suppress() + pp.restOfLine.suppress()
    ver_line = pp.Literal("ver") + anychar  #+ pp.restOfLine.suppress()
    #using SkipTO instead now

    #foundry example:
    #telnet@olse1-dc5#show  configuration | include ^(interface | ip address | ip access-group | description|!)
    #!
    #Startup-config data location is flash memory
    #!
    #Startup configuration:
    #!
    #ver 07.5.05hT53
    #!
    #module 1 bi-0-port-m4-management-module
    #module 2 bi-8-port-gig-module

    #there is a lot more that foundry is including in the output that should be ignored

    interface_keyword = pp.Keyword("interface")
    unwanted = pp.SkipTo(interface_keyword, include=False).suppress()

    #unwanted = pp.ZeroOrMore(bang ^ comment ^ aaa_line ^ module_line ^ startup_line ^ ver_line)

    octet = pp.Word(pp.nums, max=3)
    ipaddr = pp.Combine(octet + "." + octet + "." + octet + "." + octet)
    address = ipaddr
    netmask = ipaddr
    cidr = pp.Literal("/").suppress() + pp.Word(pp.nums, max=2)

    # Description
    desc_keyword = pp.Keyword("description")
    description = pp.Dict(pp.Group(desc_keyword + pp.Group(pp.restOfLine)))

    # Addresses
    #cisco example:
    # ip address 172.29.188.27 255.255.255.224 secondary
    #
    #foundry example:
    # ip address 10.62.161.187/26

    ipaddr_keyword = pp.Keyword("ip address").suppress()
    secondary = pp.Literal("secondary").suppress()

    #foundry matches on cidr and cisco matches on netmask
    #netmask converted to cidr in cleanup
    ip_tuple = pp.Group(address + (cidr ^ netmask)).setResultsName(
        'addr', listAllMatches=True)
    negotiated = pp.Literal('negotiated')  # Seen on Cisco 886
    ip_address = ipaddr_keyword + (negotiated
                                   ^ ip_tuple) + pp.Optional(secondary)

    addrs = pp.ZeroOrMore(ip_address)

    # ACLs
    acl_keyword = pp.Keyword("ip access-group").suppress()

    # acl_name to be [''] or '' depending on acls_as_list
    acl_name = pp.Group(anychar) if acls_as_list else anychar
    direction = pp.oneOf('in out').suppress()
    acl_in = acl_keyword + pp.FollowedBy(acl_name + pp.Literal('in'))
    acl_in.setParseAction(pp.replaceWith('acl_in'))
    acl_out = acl_keyword + pp.FollowedBy(acl_name + pp.Literal('out'))
    acl_out.setParseAction(pp.replaceWith('acl_out'))

    acl = pp.Dict(pp.Group((acl_in ^ acl_out) + acl_name)) + direction
    acls = pp.ZeroOrMore(acl)

    # Interfaces
    iface_keyword = pp.Keyword("interface").suppress()
    foundry_awesome = pp.Literal(" ").suppress() + anychar
    #foundry exmaple:
    #!
    #interface ethernet 6/6
    # ip access-group 126 in
    # ip address 172.18.48.187 255.255.255.255

    #cisco example:
    #!
    #interface Port-channel1
    # description gear1-mtc : AE1 : iwslbfa1-mtc-sw0 :  : 1x1000 : 172.20.166.0/24 :  :  :
    # ip address 172.20.166.251 255.255.255.0

    interface = pp.Combine(anychar + pp.Optional(foundry_awesome))

    iface_body = pp.Optional(description) + pp.Optional(acls) + pp.Optional(
        addrs) + pp.Optional(acls)
    #foundry's body is acl then ip and cisco's is ip then acl

    iface_info = pp.Optional(unwanted) + iface_keyword + pp.Dict(
        pp.Group(interface + iface_body)) + pp.Optional(pp.SkipTo(bang))

    interfaces = pp.Dict(pp.ZeroOrMore(iface_info))

    # This is where the parsing is actually happening
    try:
        results = interfaces.parseString(data)
    except:  # (ParseException, ParseFatalException, RecursiveGrammarException):
        results = {}

    if auto_cleanup:
        return _cleanup_interface_results(results, skip_disabled=skip_disabled)
    return results
Esempio n. 20
0
    def _generate_grammar(self):
        # Define grammar:
        pp.ParserElement.setDefaultWhitespaceChars(" \t")

        def add_element(name: str, value: pp.ParserElement):
            nonlocal self
            if self.debug:
                value.setName(name)
                value.setDebug()
            return value

        EOL = add_element("EOL", pp.Suppress(pp.LineEnd()))
        Else = add_element("Else", pp.Keyword("else"))
        Identifier = add_element(
            "Identifier",
            pp.Word(f"{pp.alphas}_", bodyChars=pp.alphanums + "_-./"))
        BracedValue = add_element(
            "BracedValue",
            pp.nestedExpr(ignoreExpr=pp.quotedString
                          | pp.QuotedString(quoteChar="$(",
                                            endQuoteChar=")",
                                            escQuote="\\",
                                            unquoteResults=False)).
            setParseAction(lambda s, l, t: ["(", *t[0], ")"]),
        )

        Substitution = add_element(
            "Substitution",
            pp.Combine(
                pp.Literal("$") +
                (((pp.Literal("$") + Identifier + pp.Optional(pp.nestedExpr()))
                  | (pp.Literal("(") + Identifier + pp.Literal(")"))
                  | (pp.Literal("{") + Identifier + pp.Literal("}"))
                  | (pp.Literal("$") + pp.Literal("{") + Identifier +
                     pp.Optional(pp.nestedExpr()) + pp.Literal("}"))
                  | (pp.Literal("$") + pp.Literal("[") + Identifier +
                     pp.Literal("]"))))),
        )
        LiteralValuePart = add_element(
            "LiteralValuePart", pp.Word(pp.printables, excludeChars="$#{}()"))
        SubstitutionValue = add_element(
            "SubstitutionValue",
            pp.Combine(
                pp.OneOrMore(Substitution | LiteralValuePart
                             | pp.Literal("$"))),
        )
        FunctionValue = add_element(
            "FunctionValue",
            pp.Group(
                pp.Suppress(pp.Literal("$") + pp.Literal("$")) + Identifier +
                pp.nestedExpr(
                )  # .setParseAction(lambda s, l, t: ['(', *t[0], ')'])
            ).setParseAction(lambda s, l, t: handle_function_value(*t)),
        )
        Value = add_element(
            "Value",
            pp.NotAny(Else | pp.Literal("}") | EOL) +
            (pp.QuotedString(quoteChar='"', escChar="\\")
             | FunctionValue
             | SubstitutionValue
             | BracedValue),
        )

        Values = add_element("Values", pp.ZeroOrMore(Value)("value"))

        Op = add_element(
            "OP",
            pp.Literal("=")
            | pp.Literal("-=")
            | pp.Literal("+=")
            | pp.Literal("*=")
            | pp.Literal("~="),
        )

        Key = add_element("Key", Identifier)

        Operation = add_element(
            "Operation",
            Key("key") + pp.locatedExpr(Op)("operation") + Values("value"))
        CallArgs = add_element("CallArgs", pp.nestedExpr())

        def parse_call_args(results):
            out = ""
            for item in chain(*results):
                if isinstance(item, str):
                    out += item
                else:
                    out += "(" + parse_call_args(item) + ")"
            return out

        CallArgs.setParseAction(parse_call_args)

        Load = add_element("Load", pp.Keyword("load") + CallArgs("loaded"))
        Include = add_element(
            "Include",
            pp.Keyword("include") + pp.locatedExpr(CallArgs)("included"))
        Option = add_element("Option",
                             pp.Keyword("option") + CallArgs("option"))
        RequiresCondition = add_element("RequiresCondition",
                                        pp.originalTextFor(pp.nestedExpr()))

        def parse_requires_condition(s, l_unused, t):
            # The following expression unwraps the condition via the additional info
            # set by originalTextFor.
            condition_without_parentheses = s[t._original_start +
                                              1:t._original_end - 1]

            # And this replaces the colons with '&&' similar how it's done for 'Condition'.
            condition_without_parentheses = (
                condition_without_parentheses.strip().replace(
                    ":", " && ").strip(" && "))
            return condition_without_parentheses

        RequiresCondition.setParseAction(parse_requires_condition)
        Requires = add_element(
            "Requires",
            pp.Keyword("requires") +
            RequiresCondition("project_required_condition"))

        FunctionArgumentsAsString = add_element(
            "FunctionArgumentsAsString", pp.originalTextFor(pp.nestedExpr()))
        QtNoMakeTools = add_element(
            "QtNoMakeTools",
            pp.Keyword("qtNomakeTools") +
            FunctionArgumentsAsString("qt_no_make_tools_arguments"),
        )

        # ignore the whole thing...
        DefineTestDefinition = add_element(
            "DefineTestDefinition",
            pp.Suppress(
                pp.Keyword("defineTest") + CallArgs +
                pp.nestedExpr(opener="{", closer="}", ignoreExpr=pp.LineEnd())
            ),
        )

        # ignore the whole thing...
        ForLoop = add_element(
            "ForLoop",
            pp.Suppress(
                pp.Keyword("for") + CallArgs +
                pp.nestedExpr(opener="{", closer="}", ignoreExpr=pp.LineEnd())
            ),
        )

        # ignore the whole thing...
        ForLoopSingleLine = add_element(
            "ForLoopSingleLine",
            pp.Suppress(
                pp.Keyword("for") + CallArgs + pp.Literal(":") +
                pp.SkipTo(EOL)),
        )

        # ignore the whole thing...
        FunctionCall = add_element("FunctionCall",
                                   pp.Suppress(Identifier + pp.nestedExpr()))

        Scope = add_element("Scope", pp.Forward())

        Statement = add_element(
            "Statement",
            pp.Group(Load
                     | Include
                     | Option
                     | Requires
                     | QtNoMakeTools
                     | ForLoop
                     | ForLoopSingleLine
                     | DefineTestDefinition
                     | FunctionCall
                     | Operation),
        )
        StatementLine = add_element("StatementLine",
                                    Statement + (EOL | pp.FollowedBy("}")))
        StatementGroup = add_element(
            "StatementGroup",
            pp.ZeroOrMore(StatementLine | Scope | pp.Suppress(EOL)))

        Block = add_element(
            "Block",
            pp.Suppress("{") + pp.Optional(EOL) + StatementGroup +
            pp.Optional(EOL) + pp.Suppress("}") + pp.Optional(EOL),
        )

        ConditionEnd = add_element(
            "ConditionEnd",
            pp.FollowedBy(
                (pp.Optional(pp.White()) +
                 (pp.Literal(":") | pp.Literal("{") | pp.Literal("|")))),
        )

        ConditionPart1 = add_element(
            "ConditionPart1",
            (pp.Optional("!") + Identifier + pp.Optional(BracedValue)))
        ConditionPart2 = add_element("ConditionPart2",
                                     pp.CharsNotIn("#{}|:=\\\n"))
        ConditionPart = add_element(
            "ConditionPart", (ConditionPart1 ^ ConditionPart2) + ConditionEnd)

        ConditionOp = add_element("ConditionOp",
                                  pp.Literal("|") ^ pp.Literal(":"))
        ConditionWhiteSpace = add_element(
            "ConditionWhiteSpace", pp.Suppress(pp.Optional(pp.White(" "))))

        ConditionRepeated = add_element(
            "ConditionRepeated",
            pp.ZeroOrMore(ConditionOp + ConditionWhiteSpace + ConditionPart))

        Condition = add_element("Condition",
                                pp.Combine(ConditionPart + ConditionRepeated))
        Condition.setParseAction(
            lambda x: " ".join(x).strip().replace(":", " && ").strip(" && "))

        # Weird thing like write_file(a)|error() where error() is the alternative condition
        # which happens to be a function call. In this case there is no scope, but our code expects
        # a scope with a list of statements, so create a fake empty statement.
        ConditionEndingInFunctionCall = add_element(
            "ConditionEndingInFunctionCall",
            pp.Suppress(ConditionOp) + FunctionCall +
            pp.Empty().setParseAction(lambda x: [[]]).setResultsName(
                "statements"),
        )

        SingleLineScope = add_element(
            "SingleLineScope",
            pp.Suppress(pp.Literal(":")) +
            pp.Group(Block | (Statement + EOL))("statements"),
        )
        MultiLineScope = add_element("MultiLineScope", Block("statements"))

        SingleLineElse = add_element(
            "SingleLineElse",
            pp.Suppress(pp.Literal(":")) + (Scope | Block |
                                            (Statement + pp.Optional(EOL))),
        )
        MultiLineElse = add_element("MultiLineElse", Block)
        ElseBranch = add_element(
            "ElseBranch",
            pp.Suppress(Else) + (SingleLineElse | MultiLineElse))

        # Scope is already add_element'ed in the forward declaration above.
        Scope <<= pp.Group(
            Condition("condition") + (SingleLineScope | MultiLineScope
                                      | ConditionEndingInFunctionCall) +
            pp.Optional(ElseBranch)("else_statements"))

        Grammar = StatementGroup("statements")
        Grammar.ignore(pp.pythonStyleComment())

        return Grammar
Esempio n. 21
0
def parse_file(filename, ckt):
    """
    Parse statements contained in the file pointed by filename and add
    to ckt. For now the parser is case-sensitive except for the dotted
    keywords (.model, etc.).
    """
    # *******************************************************************
    # Define some grammar
    # *******************************************************************
    # Defined here just in case (because function must be re-entrant)

    # Symbol definitions
    LPAR, RPAR, LBRACK, RBRACK, EQ, COLON, QUOTE = map(pp.Suppress, "()[]=:'")

    # ParString used for string parameter values
    parString = pp.Word(pp.alphanums + allowedChars) \
        | QUOTE + pp.Word(pp.alphanums + allowedChars + ' ()') + QUOTE

    vector = LBRACK + pp.delimitedList(parString) + RBRACK

    parName = pp.Word(pp.alphas, pp.alphanums + '_')
    parameters = pp.OneOrMore(
        pp.Group(parName + EQ +
                 (parString('single') | pp.Group(vector)('vector'))))

    # Used for names of: devices (type and instances), nodes, subcircuits
    identifier = pp.Word(pp.alphanums + '_-')

    elemname = identifier('devType') + pp.Suppress(':') \
        + identifier('instanceName')

    nodes = pp.OneOrMore(identifier + ~pp.FollowedBy("="))

    # Output variables (for .plot, .save)
    intTerm = pp.Group(
        pp.Combine(identifier + pp.Literal(':') + identifier) + COLON +
        identifier)
    oneVar = intTerm | identifier
    outVars = pp.OneOrMore(oneVar)
    # Comment line: any line that starts with # , * or //
    commentline = pp.Suppress((
        (pp.Literal('*') ^ pp.Literal('#')) + pp.Regex('.*'))
                              ^ pp.dblSlashComment)

    # example: diode:d1 1 gnd model=mydiode isat= 2e-15 area = 2.
    elemline = elemname + nodes('nodes') + pp.Optional(
        parameters('parameters'))

    # example: .model mydiode diode ( isat=1e-15 cj0=5e-12 )
    modelline = pp.Suppress(pp.Keyword('.model', caseless=True)) \
        + identifier('modName') \
        + identifier('devType') \
        + pp.Optional(LPAR + parameters('parameters') + RPAR )

    # example: .options abstol=1e-8
    optionsline = pp.Suppress(pp.Keyword('.options', caseless=True)) \
        + parameters('options')

    # example: .vars freq=1GHz
    varsline = pp.Suppress(pp.Keyword('.vars', caseless=True)) \
        + parameters('vars')

    # example: .subckt LM741 in out vdd vee
    subcktDefLine = pp.Suppress(pp.Keyword('.subckt', caseless=True)) \
        + identifier('subName') + nodes('nodes')

    # example: xamp1 2 5 1 3 LM741
    # Treat the last node as the subcircuit definition name. Sorry
    subcktInstLine = pp.Word('xX', pp.alphanums + '_')('instanceName') \
        + nodes('nodes')

    # example: .include model.net
    fileString = pp.Word(pp.alphanums + '._-+')
    includeline = pp.Suppress(pp.Keyword('.include', caseless=True)) \
        + fileString('filename')

    # example: .analysis op
    analysisline = pp.Suppress(pp.Keyword('.analysis', caseless=True)) \
        + identifier('anType') + pp.Optional(parameters('parameters'))

    # example: .plot dc 10 out1
    plotline = pp.Suppress(pp.Keyword('.plot', caseless=True)) \
        + identifier('Type') + outVars('Vars')

    # example: .save dc 10 out1
    saveline = pp.Suppress(pp.Keyword('.save', caseless=True)) \
        + identifier('Type') + outVars('Vars')

    endsline = pp.Keyword('.ends', caseless=True)

    endline = pp.Keyword('.end', caseless=True)

    netlistLine = commentline \
        | elemline.setParseAction(parse_element) \
        | modelline.setParseAction(parse_model) \
        | optionsline.setParseAction(parse_options) \
        | varsline.setParseAction(parse_vars) \
        | subcktDefLine.setParseAction(parse_subcktDef) \
        | subcktInstLine.setParseAction(parse_subcktInst) \
        | includeline.setParseAction(parse_include) \
        | analysisline.setParseAction(parse_analysis) \
        | plotline.setParseAction(parse_plot) \
        | saveline.setParseAction(parse_save) \
        | endsline.setParseAction(parse_ends) \
        | endline.setParseAction(parse_end)

    # Each time this function is called it puts the working ckt in the
    # stack and takes it out when finished. So at any time we can
    # access the current circuit as cktStack[-1]
    cktStack.append(ckt)
    # Clean queue
    analysisQueue.reset()
    # Save file name in circuit
    ckt.filename = filename

    try:
        with open(filename, 'r') as f:
            lineNumber = 0
            lineAcc = ''

            for line in f:
                lineNumber += 1
                # Remove unneeded spaces from line
                line = line.strip()
                # Prepend lineAcc value (in case previous line ended with '\')
                if lineAcc:
                    line = lineAcc + ' ' + line

                if not line:
                    # nothing left to parse in this line, go to the next
                    continue

                # First line is the main circuit title (to follow
                # spice tradition)
                if (ckt.name == 'main') and (lineNumber == 1) \
                        and (len(cktStack) == 1):
                    ckt.title = line
                    continue

                # Join consecutive lines if line ends with '\'
                if line[-1] == '\\':
                    # remove backslash
                    line = line[:-1]
                    # must read next line before continuing
                    lineAcc = line
                    continue
                else:
                    # Reset the accumulator
                    lineAcc = ''

                # Most of the work is made here
                try:
                    result = netlistLine.parseString(line, parseAll=True)
                except (ParseError, NetVarException) as pe:
                    raise ParseError('Parse error in file ' + filename +
                                     ', line ' + str(lineNumber) + ':\n' +
                                     str(pe) + '\n"' + line + '"')
                except cir.CircuitError as ce:
                    raise ParseError('Circuit error in file ' + filename +
                                     ', line ' + str(lineNumber) + ':\n' +
                                     str(ce) + '\n"' + line + '"')
                except pp.ParseException as pe:
                    mesg = 'Syntax error in file ' + filename
                    mesg += ', line ' + str(lineNumber) + '\n'
                    mesg += '"' + line + '"'
                    if len(line) < 80:
                        mesg += '\n' + pe.col * '-' + '^'
                    raise ParseError(mesg)

                if not cktStack:
                    # This means that .end was found and processing must end
                    # immediatly
                    break

            # Take circuit out (if stack not empty due to .end)
            if cktStack:
                cktStack.pop()

    except IOError as ioe:
        raise ParseError('Parse error -> ' + str(ioe))

    return analysisQueue.q
Esempio n. 22
0
File: parser.py Progetto: WIEQLI/xmd
    pp.Suppress(pp.Word("'") | pp.Word('"')) + pp.Word(
        "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!#$%&()*+,-./:;<=>?@[\]^_`{|}~ "
    ) + pp.Suppress(pp.Word("'") | pp.Word('"')))

arg = pp.Group(pp.Word(pp.alphanums + '"\'/._-')).setParseAction(createArg)

kwarg = pp.Group(variable_name + pp.Suppress('=') +
                 pp.Word(pp.alphas)).setParseAction(createKwarg)

command = (variable_name + pp.Group(
    pp.Optional(pp.originalTextFor(pp.nestedExpr(
        opener='(', closer=')'))))).setParseAction(nodes.CommandArguments)

Command = (
    pp.nestedExpr(opener='[>', closer=']', content=command) +
    pp.FollowedBy('{') +
    pp.originalTextFor(pp.nestedExpr(
        opener='{', closer='}')).setParseAction(recurse)).setParseAction(
            nodes.chooseCommand)

injector = pp.Group(pp.Suppress('{{') + pp.Word(pp.alphas) + pp.Suppress('}}'))

unicodePrintables = u''.join(
    unichr(c) for c in xrange(sys.maxunicode)
    if not (unichr(c).isspace() or unichr(c) == '[' or unichr(c) == ']'))

chars = ("0123456789"
         "abcdefghijklmnopqrstuvwxyz"
         "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
         "!\"#$%&'(){}*+,-./:;<=>?@\^_`|~ \r\n" + unicodePrintables)
Esempio n. 23
0
    def _add_extensions_to_entity_results(self, analysis) -> None:
        LOGGER.debug(f'adding swift extensions to entity results...')
        entity_results: Dict[str, AbstractEntityResult] = {
            k: v
            for (k, v) in self.results.items()
            if v.analysis is analysis and isinstance(v, AbstractEntityResult)
        }
        file_results: Dict[str, AbstractFileResult] = {
            k: v
            for (k, v) in self.results.items()
            if v.analysis is analysis and isinstance(v, AbstractFileResult)
        }

        entity_names: List[str] = [
            v.entity_name for _, v in entity_results.items()
        ]

        result: FileResult
        for _, result in file_results.items():
            entity_keywords: List[str] = [SwiftParsingKeyword.EXTENSION.value]
            entity_name_of_extension = pp.Word(pp.alphanums)

            match_expression = pp.Keyword(SwiftParsingKeyword.EXTENSION.value) + \
                entity_name_of_extension.setResultsName(CoreParsingKeyword.ENTITY_NAME.value) + pp.SkipTo(pp.FollowedBy(SwiftParsingKeyword.OPEN_SCOPE.value))

            comment_keywords: Dict[str, str] = {
                CoreParsingKeyword.LINE_COMMENT.value:
                SwiftParsingKeyword.INLINE_COMMENT.value,
                CoreParsingKeyword.START_BLOCK_COMMENT.value:
                SwiftParsingKeyword.START_BLOCK_COMMENT.value,
                CoreParsingKeyword.STOP_BLOCK_COMMENT.value:
                SwiftParsingKeyword.STOP_BLOCK_COMMENT.value
            }
            extension_entity_results: List[
                AbstractEntityResult] = result.generate_entity_results_from_scopes(
                    entity_keywords, match_expression, comment_keywords)

            for extension in extension_entity_results:
                if extension.entity_name in entity_names:
                    entity_result: AbstractEntityResult = analysis.result_by_entity_name(
                        extension.entity_name)
                    if entity_result is not None:
                        entity_result.scanned_tokens.extend(
                            extension.scanned_tokens)
                        LOGGER.debug(
                            f'added extension from file result {result=} to entity result: {entity_result=}.'
                        )
Esempio n. 24
0
    def __init__(self,
                 base_freq=440.0,
                 amplitude=.5,
                 max_gain=10.,
                 min_gain=-200.,
                 new_scale='C/a',
                 clef='violin'):

        # an important constant value for the conversion of musical halt tone steps to frequency values
        # is the twelfth root of 2
        self.__root__ = 1.0594630943592952645618252949463  # (2 ** (1 / 12))

        # *** parser definitions ***
        # helper
        no_whites = pp.NotAny(pp.White())
        tok_end = (pp.StringEnd() | pp.LineEnd()).suppress()

        # numbers
        real = pp.Combine(
            pp.Word(pp.nums) +
            pp.Optional(pp.Char(',.') + pp.Word(pp.nums))).setParseAction(
                lambda t: float(t[0].replace(',', '.')))

        integer = (pp.Optional(pp.Literal('-')) +
                   pp.Word(pp.nums)).setParseAction(
                       lambda t: int(t[0] + t[1]) if len(t) > 1 else int(t[0]))

        # signs
        must_sign = pp.Char('+-').setParseAction(lambda t: float(t[0] + '1'))
        may_sign = pp.Optional(pp.Char('+-')).setParseAction(
            lambda t: float(t[0] + '1' if len(t) > 0 else '1'))

        # note value cents
        cent = (must_sign + no_whites +
                real).setParseAction(lambda t: t[0] * t[1] / 100)

        # helpers for the note name parser
        note_name_offset = {
            'C': -9,
            'D': -7,
            'E': -5,
            'F': -4,
            'G': -2,
            'A': 0,
            'B': 2,
        }
        note_name = pp.Char('CDEFGABcdefgab').setParseAction(
            lambda t: note_name_offset[t[0]
                                       if t[0] in 'CDEFGAB' else t[0].upper()])

        flat_sharp = pp.Char('#b').setParseAction(lambda t: 1
                                                  if t[0] == '#' else -1)
        octave = pp.Char('0123456789').setParseAction(lambda t:
                                                      (int(t[0]) - 4) * 12)
        full_note = (note_name + no_whites +
                     pp.Optional(pp.FollowedBy(flat_sharp) + flat_sharp) +
                     no_whites + pp.FollowedBy(octave) +
                     octave).setParseAction(lambda t: sum(t))

        self.note_name_parser = (
            full_note + pp.Optional(pp.White()).suppress() +
            pp.Optional(cent) + tok_end
        ).setParseAction(lambda t: float(sum(t))).setResultsName('note_value')

        # frequency parsers
        hertz = real + pp.Literal('Hz').suppress()

        self.frequency_parser = (hertz + tok_end).setParseAction(
            lambda t: float(t[0])).setResultsName('frequency')

        self.base_freq_parser = (
            full_note + pp.Literal('=').suppress() + hertz + tok_end
        ).setParseAction(lambda t: t[1] * (1.0594630943592952645618252949463**
                                           -t[0])).setResultsName('base_freq')

        # parses a string like "sc -7:b" into a musical half tone step (using the MusicConverter.set method)
        sign = (pp.Keyword('##') | pp.Keyword('bb') | pp.Keyword('#')
                | pp.Keyword('b') | pp.Keyword('n') | pp.Keyword('_'))
        self.score_parser = (integer + pp.Literal(':').suppress() + sign +
                             tok_end).setResultsName('notation')

        # amplitude parser
        self.amp_parser = (
            real + pp.Literal('%').suppress() + tok_end
        ).setParseAction(lambda t: float(t[0])).setResultsName('amplitude')

        self.gain_parser = (
            may_sign + real + pp.Literal('dB').suppress() + tok_end
        ).setParseAction(lambda t: float(t[0] * t[1])).setResultsName('gain')

        # clef parser
        self.clef_parser = (pp.Keyword('violin') | pp.Keyword('alto')
                            | pp.Keyword('bass')).setResultsName('clef')

        # key parser
        key_token = pp.NoMatch()
        for key in self.keys:
            key_token = key_token | pp.Keyword(key)

        self.key_parser = (key_token).setResultsName('key')

        # complete parser
        self.input_parser = self.note_name_parser | \
                            self.frequency_parser | \
                            self.base_freq_parser | \
                            self.amp_parser | \
                            self.gain_parser | \
                            self.clef_parser | \
                            self.key_parser | \
                            self.score_parser

        # *** initializations ***
        self.__note_value__ = 0.
        self.__base_freq__ = 440.
        self.base_freq = base_freq

        self.key = new_scale
        self.__names__ = 'C D EF G A B'
        self.clef = clef
        self.__clef__ = 'violin'

        self.max_gain = max_gain
        self.min_gain = min_gain
        self.amplitude = amplitude
Esempio n. 25
0
FADE = pp.Literal('FADE')
WIPE = pp.Literal('WIPE')
MISC_TRANS = pp.oneOf('LATER', 'SAME SCENE', 'BACK TO SCENE')
FLASHBACK = pp.oneOf(['FLASHBACK', 'FLASHFORWARD'])
TRANSITIONS = pp.Combine(pp.Optional(CAPS) + pp.Or([CUT, DISSOLVE, FADE, WIPE, MISC_TRANS, FLASHBACK]) + pp.Optional(pp.Word(ALPHANUMS)) + pp.Optional(pp.Literal(':').suppress()), joinString=' ', adjacent=False).setResultsName('transition')

# Sound and Visual WORDS
# consider not using, because could be character's name or place?
# SVW = pp.oneOf('FLASH', "ROAR", 'CRACK', 'KNOCK', 'SMACK', 'THUMP', 'ROMP', 'SCREECH', 'PLOP', 'SPLASH', 'BEEP', 'BANG', 'SQUISH', 'FIZZ', 'OINK', 'TICK', 'TOCK', 'ZAP', 'VROOM', 'PING', 'HONK', 'FLUTTER', 'AWOOGA', 'OOM-PAH', 'CLANK', 'BAM', 'BOP')


#misc
mid_x = pp.Literal('mid').suppress() + pp.Word(pp.alphanums)
continuous_action = pp.Or(pp.Literal('CONTINUOUS ACTION'), pp.Literal('continuous action'))

enumerated_time_word = pp.oneOf(['sunrise', 'sunset', 'present', 'later', 'before', 'breakfast', 'lunch', 'dinner', 'past', 'spring', 'summer', 'fall', 'winter', 'easter', 'christmas', 'passover', 'eve', 'dusk', 'ramadan', 'birthday', 'purim', 'holi', 'equinox', 'kwanzaa', 'recent', 'annual', 'sundown', 'sun-down', 'sun-up', 'tonight', 'dawn']) + ~(~WH + pp.Word(pp.alphanums))

stop_words = ~pp.oneOf(['is', 'home', 'this', 'that', 'there', 'are', 'were', 'be', 'for', 'with', 'was', 'won\'t', 'aren\'t', 'ain\'t', 'isn\'t', 'not', 'on', 'above', 'into', 'around', 'over', 'in', 'number', 'another', 'third', 'fourth', 'anything', 'hear', 'wife', 'run', 'me', 'case', 'everyone', 'friends'])


def num_spaces(tokens):
	return len(tokens[0])

spaces = pp.OneOrMore(pp.White(ws=' ', min=1)).addParseAction(num_spaces).setResultsName('indent')
min_2_spaces = pp.OneOrMore(pp.White(ws=' ', min=2)).addParseAction(num_spaces).setResultsName('indent')
w = pp.OneOrMore(pp.White(ws='\t\r\n', min=1, max=0, exact=0))
wall = w + spaces


one_word_title = pp.Word(ALPHANUMS, max=1) & pp.FollowedBy(pp.Word(lower))
Esempio n. 26
0
class grammar_def:

    # Forward定義
    expression = pp.Forward()
    assignment_expression = pp.Forward()
    type_name = pp.Forward()
    initializer_list = pp.Forward()
    cast_expression = pp.Forward()

    # (6.5.2) argument-expression-list:
    argument_expression_list = (
        assignment_expression +
        pp.ZeroOrMore(token.punctuator.comma + assignment_expression))

    # (6.5.1) primary-expression:
    primary_expression = (token.identifier
                          | token.constant
                          | token.string_literal
                          | (token.punctuator.left_paren + expression +
                             token.punctuator.right_paren))
    # (6.5.2) postfix-expression:
    postfix_expression_1 = (token.punctuator.left_bracket + expression +
                            token.punctuator.right_bracket)
    postfix_expression_2 = (token.punctuator.left_paren +
                            argument_expression_list +
                            token.punctuator.right_paren)
    postfix_expression_3 = (
        (token.punctuator.dot | token.punctuator.arrow_op) + token.identifier)
    postfix_expression_4 = (postfix_expression_1
                            | postfix_expression_2
                            | postfix_expression_3
                            | token.punctuator.increment_op
                            | token.punctuator.decrement_op)
    # ( type-name ) { init-list } はcast-expressionで解析する
    postfix_expression_5 = (token.punctuator.left_paren + type_name +
                            token.punctuator.right_paren +
                            token.punctuator.left_brace + initializer_list +
                            pp.Optional(token.punctuator.comma) +
                            token.punctuator.right_bracket)
    postfix_expression = pp.Forward()
    postfix_expression <<= (
        (primary_expression + pp.ZeroOrMore(postfix_expression_4))
        | postfix_expression_5)
    # (6.5.3) unary-operator: one of
    unary_operator = (token.punctuator.ampersand
                      | token.punctuator.asterisk
                      | token.punctuator.plus
                      | token.punctuator.minus
                      | token.punctuator.bitwise_complement_op
                      | token.punctuator.logical_negation_op)
    # (6.5.3) unary-expression:
    unary_expression = pp.Forward()
    unary_expression_1 = (token.keyword.sizeof_ +
                          (unary_expression
                           | (token.punctuator.left_paren + type_name +
                              token.punctuator.right_paren)))
    unary_expression <<= (postfix_expression
                          | (token.punctuator.increment_op + unary_expression)
                          | (token.punctuator.decrement_op + unary_expression)
                          | (unary_operator + cast_expression)
                          | unary_expression_1)
    # (6.5.4) cast-expression:
    # "( identifier )" のケースのみgrammarが区別できない
    # ケース:postfix-expression: ( type-name ) { initializer-list }
    cast_expression_1 = (
        pp.FollowedBy(token.punctuator.left_paren + token.identifier +
                      token.punctuator.right_paren +
                      token.punctuator.left_brace) +
        token.punctuator.left_paren + token.identifier +
        token.punctuator.right_paren + token.punctuator.left_brace +
        initializer_list + pp.Optional(token.punctuator.comma) +
        token.punctuator.right_bracket)
    cast_expression_2 = (
        pp.FollowedBy(token.punctuator.left_paren + token.identifier +
                      token.punctuator.right_paren + cast_expression) +
        token.punctuator.left_paren + token.identifier +
        token.punctuator.right_paren + cast_expression)
    cast_expression_3 = (
        pp.FollowedBy(token.punctuator.left_paren + type_name +
                      token.punctuator.right_paren + cast_expression) +
        token.punctuator.left_paren + type_name +
        token.punctuator.right_paren + cast_expression)
    cast_expression <<= (cast_expression_1
                         | cast_expression_2
                         | cast_expression_3
                         | unary_expression)
    # (6.5.5) multiplicative-expression:
    multiplicative_expression = pp.Forward()
    multiplicative_expression <<= (
        cast_expression +
        pp.Optional((token.punctuator.asterisk
                     | token.punctuator.div_op
                     | token.punctuator.remain_op) + cast_expression))
    # (6.5.6) additive-expression:
    additive_expression = pp.Forward()
    additive_expression <<= (
        multiplicative_expression +
        pp.Optional((token.punctuator.plus
                     | token.punctuator.minus) + multiplicative_expression))
    # (6.5.7) shift-expression:
    shift_expression = pp.Forward()
    shift_expression <<= (
        additive_expression +
        pp.Optional((token.punctuator.left_shift_op
                     | token.punctuator.right_shift_op) + additive_expression))
    # (6.5.8) relational-expression:
    relational_expression = pp.Forward()
    relational_expression <<= (
        shift_expression +
        pp.Optional((token.punctuator.lt_op
                     | token.punctuator.gt_op
                     | token.punctuator.lte_op
                     | token.punctuator.gte_op) + shift_expression))
    # (6.5.9) equality-expression:
    equality_expression = pp.Forward()
    equality_expression <<= (
        relational_expression +
        pp.Optional((token.punctuator.equal_op
                     | token.punctuator.inequal_op) + relational_expression))
    # (6.5.10) AND-expression:
    AND_expression = pp.Forward()
    AND_expression <<= (
        equality_expression +
        pp.Optional(token.punctuator.ampersand + equality_expression))
    # (6.5.11) exclusive-OR-expression:
    exclusive_OR_expression = pp.Forward()
    exclusive_OR_expression <<= (
        AND_expression +
        pp.Optional(token.punctuator.bitwise_EXOR_op + AND_expression))
    # (6.5.12) inclusive-OR-expression:
    inclusive_OR_expression = pp.Forward()
    inclusive_OR_expression <<= (
        exclusive_OR_expression +
        pp.Optional(token.punctuator.bitwise_OR_op + exclusive_OR_expression))
    # (6.5.13) logical-AND-expression:
    logical_AND_expression = pp.Forward()
    logical_AND_expression <<= (
        inclusive_OR_expression +
        pp.Optional(token.punctuator.logical_AND_op + inclusive_OR_expression))
    # (6.5.14) logical-OR-expression:
    logical_OR_expression = pp.Forward()
    logical_OR_expression <<= (
        logical_AND_expression +
        pp.Optional(token.punctuator.logical_OR_op + logical_AND_expression))
    # (6.5.15) conditional-expression:
    conditional_expression = pp.Forward()
    conditional_expression <<= (
        logical_OR_expression +
        pp.Optional(token.punctuator.conditional_op + expression +
                    token.punctuator.colon + conditional_expression))
    # (6.5.16) assignment-operator: one of
    assignment_operator = (token.punctuator.simple_assign_op
                           | token.punctuator.mul_assign_op
                           | token.punctuator.div_assign_op
                           | token.punctuator.remain_assign_op
                           | token.punctuator.add_assign_op
                           | token.punctuator.sub_assign_op
                           | token.punctuator.left_shift_assign_op
                           | token.punctuator.right_shift_assign_op
                           | token.punctuator.bitwise_AND_assign_op
                           | token.punctuator.bitwise_EXOR_assign_op
                           | token.punctuator.bitwise_OR_assign_op)
    # (6.5.16) assignment-expression:
    assignment_expression = pp.Forward()
    assignment_expression <<= (
        conditional_expression
        | (unary_expression + assignment_operator + assignment_expression))
    # (6.5.17) expression:
    expression <<= (
        assignment_expression +
        pp.Optional(token.punctuator.comma + assignment_expression))
    # (6.6) constant-expression:
    constant_expression = conditional_expression

    # (6.7.3) type-qualifier:
    type_qualifier = (token.keyword.const_
                      | token.keyword.restrict_
                      | token.keyword.volatile_)
    # (6.7.5) type-qualifier-list:
    type_qualifier_list = type_qualifier[1, ...]
    # (6.7.4) function-specifier:
    function_specifier = token.keyword.inline_

    # (6.7.5) pointer:
    pointer = pp.Group(
        (token.punctuator.asterisk + pp.Optional(type_qualifier_list))[1, ...])

    # (6.7.5) declarator:
    direct_declarator_base = pp.Optional(pointer) + token.identifier
    direct_declarator_postfix1 = (token.punctuator.left_bracket +
                                  pp.SkipTo(token.punctuator.right_bracket) +
                                  token.punctuator.right_bracket)
    direct_declarator_postfix2 = (token.punctuator.left_paren +
                                  pp.SkipTo(token.punctuator.right_paren) +
                                  token.punctuator.right_paren)
    declarator = pp.Forward()
    direct_declarator_1 = (token.identifier.copy())("id")
    direct_declarator_2 = (token.punctuator.left_paren + declarator +
                           token.punctuator.right_paren)
    direct_declarator = (
        direct_declarator_1
        | direct_declarator_2) + pp.ZeroOrMore(direct_declarator_postfix1
                                               | direct_declarator_postfix2)
    declarator <<= (pp.Optional(pointer))("pointer") + direct_declarator

    # (6.7.8) designator:
    """
	designator_1 = pp.Forward()
	designator_1 <<= (
		token.punctuator.left_bracket
		+ pp.SkipTo(pp.Word("[]"))
		+ pp.ZeroOrMore(
			designator_1
			+ pp.SkipTo(pp.Word("[]"))
		)
		+ token.punctuator.right_bracket
	)
	"""
    designator_1 = pp.nestedExpr("[", "]")
    designator = pp.Combine(designator_1
                            | (token.punctuator.dot + token.identifier))
    # (6.7.8) designator-list:
    designator_list = pp.OneOrMore(designator)
    # (6.7.8) designation:
    designation = designator_list + token.punctuator.simple_assign_op
    # (6.7.8) initializer:
    initializer_1 = (
        # assignment-expression代替
        pp.SkipTo(token.punctuator.semicolon))
    initializer_2 = (token.punctuator.left_brace + initializer_list +
                     pp.Optional(token.punctuator.comma) +
                     token.punctuator.right_brace)
    initializer = (initializer_1 | initializer_2)
    # (6.7.8) initializer-list:
    initializer_list_1 = pp.Optional(designation) + initializer
    initializer_list <<= (
        initializer_list_1 +
        pp.ZeroOrMore(token.punctuator.comma + initializer_list_1))

    # (6.7) init-declarator:
    init_declarator = pp.Group(
        pp.Group(declarator.copy())("declarator") +
        pp.Optional(token.punctuator.simple_assign_op + initializer)("init"))
    # (6.7) init-declarator-list:
    #	init_declarator_list = (
    #		init_declarator
    #		+ pp.ZeroOrMore(
    #			token.punctuator.comma
    #			+ init_declarator
    #		)
    #	)("declarator_list")
    init_declarator_list = pp.Group(pp.delimitedList(init_declarator,
                                                     ","))("declarator_list")

    # (6.7.1) storage-class-specifier:
    storage_class_specifier = (
        #		token.keyword.typedef_.copy().setParseAction(ev_hdler.typedef_begin)
        token.keyword.typedef_("typedef")
        | token.keyword.extern_
        | token.keyword.static_
        | token.keyword.auto_
        | token.keyword.register_
        | token.keyword.near
        | token.keyword.far)
    storage_class_specifier_la = (token.keyword.typedef_
                                  | token.keyword.extern_
                                  | token.keyword.static_
                                  | token.keyword.auto_
                                  | token.keyword.register_
                                  | token.keyword.near
                                  | token.keyword.far)

    struct_or_union_specifier = pp.Forward()
    enum_specifier = pp.Forward()
    typedef_name = token.identifier

    # (6.7.2) type-specifier:
    type_specifier_int_pre = (token.keyword.signed_ | token.keyword.unsigned_)
    type_specifier_int = (
        token.keyword.char_
        | token.keyword.short_
        | token.keyword.int_
        | (token.keyword.long_ + pp.Optional(token.keyword.long_)))
    type_specifier = (
        token.keyword.void_
        | (pp.Optional(type_specifier_int_pre) + type_specifier_int)
        | token.keyword.float_
        | token.keyword.double_
        | token.keyword._Bool_
        | token.keyword._Complex_
        | struct_or_union_specifier
        | enum_specifier
        | typedef_name)
    type_specifier_root = (
        token.keyword.void_
        | (pp.Optional(type_specifier_int_pre) + type_specifier_int)
        | token.keyword.float_
        | token.keyword.double_
        | token.keyword._Bool_
        | token.keyword._Complex_
        | struct_or_union_specifier
        | enum_specifier
        | typedef_name)

    # (6.7.2.1) specifier-qualifier-list:
    # type-specifierは1度しか出現しないものとする
    specifier_qualifier_list = pp.Group(
        pp.ZeroOrMore(type_qualifier) + type_specifier.copy()('type_spec') +
        pp.ZeroOrMore(type_qualifier))('specifier_qualifier_list')

    # (6.7.2.1) struct-or-union:
    struct_or_union = (token.keyword.struct_ | token.keyword.union_)

    # (6.7.2.1) struct-declarator:
    struct_declarator_1 = (token.punctuator.colon + constant_expression.copy())
    struct_declarator_2 = (declarator.copy() +
                           pp.Optional(struct_declarator_1))
    struct_declarator = (struct_declarator_1 | struct_declarator_2)
    # (6.7.2.1) struct-declarator-list:
    struct_declarator_list = (
        struct_declarator +
        pp.Optional(token.punctuator.comma + struct_declarator))
    # (6.7.2.1) struct-declaration:
    struct_declaration = pp.Group(
        pp.ZeroOrMore(grammar_comment.any_comment_parser)("comment_pre") +
        specifier_qualifier_list + struct_declarator_list +
        token.punctuator.semicolon +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment"))
    # (6.7.2.1) struct-declaration-list:
    struct_declaration_list = pp.OneOrMore(struct_declaration)
    # (6.7.2.1) struct-or-union-specifier:
    struct_or_union_specifier_1 = (
        token.punctuator.left_brace +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        pp.Group(struct_declaration_list)("struct_decl_list") +
        token.punctuator.right_brace +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment"))
    struct_or_union_specifier_2 = (
        token.identifier.copy()("struct_id") +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        pp.Optional(struct_or_union_specifier_1))
    struct_or_union_specifier <<= pp.Group(
        struct_or_union("struct_or_union") +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        (struct_or_union_specifier_1
         | struct_or_union_specifier_2))("struct_spec")

    # (6.7) declaration-specifiers:
    declaration_specifiers = (pp.ZeroOrMore(storage_class_specifier)
                              & pp.Optional(type_specifier)
                              & pp.ZeroOrMore(type_qualifier)
                              & pp.ZeroOrMore(function_specifier))
    declaration_specifiers_root = (pp.ZeroOrMore(storage_class_specifier)
                                   & pp.Optional(type_specifier_root)
                                   & pp.ZeroOrMore(type_qualifier)
                                   & pp.ZeroOrMore(function_specifier))
    # lookAhead用のハンドラ無しver
    declaration_specifiers_la = (pp.ZeroOrMore(storage_class_specifier_la)
                                 & pp.Optional(type_specifier)
                                 & pp.ZeroOrMore(type_qualifier)
                                 & pp.ZeroOrMore(function_specifier))
    """
	declaration_specifiers = (
		pp.ZeroOrMore(
			storage_class_specifier
			| type_qualifier
			| function_specifier
		)
		+ type_specifier.copy().setParseAction(analyzer.declaration_type)
		+ pp.ZeroOrMore(
			storage_class_specifier
			| type_qualifier
			| function_specifier
		)
	)
	"""

    # (6.7.2.2) enumerator:
    enumerator = (
        token.constant_def.enumeration_constant.copy()("enum_id") +
        pp.Optional(token.punctuator.simple_assign_op
                    # expression解析は再帰し過ぎで死ぬ
                    #			+ constant_expression
                    + pp.CharsNotIn(',}'))("enum_init"))
    # (6.7.2.2) enumerator-list:
    enumerator_list = (
        pp.ZeroOrMore(grammar_comment.any_comment_parser)("comment_pre") +
        enumerator +
        pp.ZeroOrMore(token.punctuator.comma +
                      pp.Optional(grammar_comment.one_line_comment_parser)
                      ("comment") +
                      pp.ZeroOrMore(grammar_comment.any_comment_parser)
                      ("comment_pre") + pp.Optional(enumerator)))
    # (6.7.2.2) enum-specifier:
    enum_specifier_1 = (
        token.punctuator.left_brace +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        pp.Group(enumerator_list)("enum_list") + token.punctuator.right_brace +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment"))
    enum_specifier_2 = (
        token.identifier.copy()("enum_id") +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        pp.Optional(enum_specifier_1))
    enum_specifier <<= pp.Group(
        token.keyword.enum_("enum") +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        (enum_specifier_2
         | enum_specifier_1))("enum_spec")

    # (6.7) declaration:
    # 実質不使用
    # グローバル宣言はdeclarationから始まらず、external-declarationから開始である点に注意
    declaration = pp.Group(declaration_specifiers +
                           pp.Optional(init_declarator_list) +
                           token.punctuator.semicolon)

    abstract_declarator = pp.Forward()

    # (6.7.5) parameter-declaration:
    parameter_declaration = (declaration_specifiers +
                             (declarator
                              | pp.Optional(abstract_declarator)))
    # (6.7.5) parameter-list:
    parameter_list = (
        parameter_declaration +
        pp.Optional(token.punctuator.comma + parameter_declaration))
    # (6.7.5) parameter-type-list:
    parameter_type_list = (
        parameter_list +
        pp.Optional(token.punctuator.comma + token.punctuator.ellipsis))

    direct_abstract_declarator = pp.Forward()
    # (6.7.6) abstract-declarator:
    abstract_declarator_1 = (pointer + pp.Optional(direct_abstract_declarator))
    abstract_declarator_2 = (direct_abstract_declarator)
    abstract_declarator <<= (abstract_declarator_1 | abstract_declarator_2)
    # (6.7.6) direct-abstract-declarator:
    direct_abstract_declarator_1 = (token.punctuator.left_paren +
                                    abstract_declarator +
                                    token.punctuator.right_paren)
    direct_abstract_declarator_2 = (
        pp.Optional(direct_abstract_declarator) +
        (pp.nestedExpr("[", "]")
         | (token.punctuator.left_paren + pp.Optional(parameter_type_list) +
            token.punctuator.right_paren)))
    direct_abstract_declarator <<= (direct_abstract_declarator_1
                                    | direct_abstract_declarator_2)

    # (6.7.6) type-name:
    type_name <<= (specifier_qualifier_list + pp.Optional(abstract_declarator))

    # (6.9.1) declaration-list:
    declaration_list = pp.OneOrMore(declaration)

    # (6.9) external-declaration:
    # (6.9.1) function-definition:
    #     declaration-specifiers declarator declaration-listopt compound-statement
    # (6.7) declaration:
    #     declaration-specifiers init-declarator-listopt ;
    # declarator移行をlookaheadでチェック
    # 1) "declarator =", "declarator ,", "declarator ;", ";" はdeclaration
    external_declaration_lookahead_1 = pp.FollowedBy(
        declaration_specifiers_la + ((declarator +
                                      (token.punctuator.simple_assign_op
                                       | token.punctuator.comma
                                       | token.punctuator.semicolon))
                                     | token.punctuator.semicolon))
    external_declaration_1 = (
        external_declaration_lookahead_1.ignore(grammar_comment.comment_parser)
        + pp.Group((declaration_specifiers_root("decl_spec") + pp.Optional(
            init_declarator_list)).ignore(grammar_comment.comment_parser) +
                   (token.punctuator.semicolon +
                    pp.Optional(grammar_comment.one_line_comment_parser)
                    ("comment")))("external_decl").setParseAction(
                        act_hdler.external_declaration))
    # 2) "declarator declaration", "declarator {" はfunction-definition
    external_declaration_lookahead_2 = pp.FollowedBy(
        declaration_specifiers_la + declarator +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        (declaration
         | token.punctuator.left_brace))
    external_declaration_2 = (
        external_declaration_lookahead_2 + declaration_specifiers +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        declarator + pp.Optional(declaration_list) +
        pp.Optional(grammar_comment.one_line_comment_parser)("comment") +
        pp.nestedExpr("{", "}"))
    external_declaration = (
        grammar_pp.parser
        | external_declaration_1
        | external_declaration_2.ignore(grammar_comment.comment_parser)
        | grammar_comment.any_comment_parser.copy().setParseAction(
            act_hdler.comment)
        # ここまでにマッチしなかったら適当に読み捨てる
        | token.identifier.copy().setParseAction(parse_debug_unknown_token))
    translation_unit = pp.OneOrMore(external_declaration) + pp.Regex(
        r".*").setParseAction(parse_debug_unknown_token)
Esempio n. 27
0
def parse_hspice(netlist_string):
    # newlines are part of the grammar, thus redifine the whitespaces without it
    ws = ' \t'
    _p.ParserElement.setDefaultWhitespaceChars(ws)

    # spectre netlist grammar definition
    EOL = _p.LineEnd().suppress()  # end of line
    linebreak = _p.Suppress(_p.LineEnd() +
                            "+")  # breaking a line with backslash newline
    identifier = _p.Word(_p.alphanums + '_!<>#-+')  # a name for...
    number = _p.Word(_p.nums + ".")  # a number
    net = identifier  # a net
    nets = _p.Group(_p.OneOrMore(net('net') + ~_p.FollowedBy("=")
                                 | linebreak))  # many nets
    cktname = identifier  # name of a subcircuit
    cktname_end = _p.CaselessLiteral(".ends").suppress()
    comment = _p.Suppress("//" + _p.SkipTo(_p.LineEnd())) | _p.Suppress(
        "*" + _p.SkipTo(_p.LineEnd()))
    expression = _p.Word(_p.alphanums + '._*+-/()')
    inst_param_key = identifier + _p.Suppress("=")
    inst_param_value = expression('expression')
    inst_parameter = _p.Group(
        inst_param_key('name') +
        inst_param_value('value')).setResultsName('key')
    parameters = _p.Group(
        _p.ZeroOrMore(inst_parameter | linebreak)).setResultsName('parameters')
    instname = identifier
    instnets = _p.Group(
        _p.OneOrMore(net('net') + ~_p.FollowedBy("=") | linebreak))
    instance = _p.Group(
        instname('name') + instnets('instnets') + parameters +
        EOL).setResultsName('instance')
    subcircuit_content = _p.Group(
        _p.ZeroOrMore(instance | EOL | comment)).setResultsName('subnetlist')
    subcircuit = _p.Group(
        # matches subckt <name> <nets> <newline>
        _p.CaselessLiteral(".subckt").suppress() + cktname('name') +
        _p.Optional(nets('nets')) + EOL
        # matches the content of the subcircuit
        + subcircuit_content
        # matches ends <name> <newline>
        + cktname_end + _p.matchPreviousExpr(cktname).suppress() +
        EOL).setResultsName('subcircuit')
    topcircuit = _p.Group(
        # matches subckt <name> <nets> <newline>
        _p.CaselessLiteral(".topckt").suppress() + cktname('name') +
        _p.Optional(nets('nets')) + EOL
        # matches the content of the subcircuit
        + subcircuit_content
        # matches ends <name> <newline>
        + cktname_end + _p.matchPreviousExpr(cktname).suppress() +
        EOL).setResultsName('topcircuit')
    netlist_element = topcircuit | subcircuit | EOL | comment('comment')
    netlist = _p.ZeroOrMore(netlist_element) + _p.StringEnd()

    parameters.setParseAction(handle_parameters)
    instance.setParseAction(handle_instance)
    subcircuit.setParseAction(handle_subcircuit)
    topcircuit.setParseAction(handle_topcircuit)

    return netlist.parseString(netlist_string)
Esempio n. 28
0
    def _add_imports_to_file_results(self, analysis) -> None:
        """Adds imports to file results. Since Swift has no direct include directives for files,
        we have to do a little workaround here:
        1. We extract all entities + entity names
        2. We loop though all entity results/ file results and check if an entity name is part of a file results scanned tokens.
        If this is the case we add an import dependency.

        Args:
            analysis (Analysis): A given analysis.
        """
        # TODO: add framework imports
        LOGGER.debug(f'adding imports to file results...')
        entity_results: Dict[str, AbstractEntityResult] = {}

        filtered_results: Dict[str, FileResult] = {
            k: v
            for (k, v) in self.results.items()
            if v.analysis is analysis and isinstance(v, AbstractFileResult)
        }

        # 1. extract entities
        result: FileResult
        for _, result in filtered_results.items():

            entity_keywords: List[str] = [
                SwiftParsingKeyword.CLASS.value,
                SwiftParsingKeyword.STRUCT.value
            ]
            entity_name = pp.Word(pp.alphanums)

            match_expression = (pp.Keyword(SwiftParsingKeyword.CLASS.value) | pp.Keyword(SwiftParsingKeyword.STRUCT.value)) + \
                (~pp.Keyword(SwiftParsingKeyword.VAR.value) & ~pp.Keyword(SwiftParsingKeyword.FUNC.value)) + \
                entity_name.setResultsName(CoreParsingKeyword.ENTITY_NAME.value) + \
                pp.Optional(pp.Keyword(CoreParsingKeyword.COLON.value)) + pp.SkipTo(pp.FollowedBy(SwiftParsingKeyword.OPEN_SCOPE.value))

            comment_keywords: Dict[str, str] = {
                CoreParsingKeyword.LINE_COMMENT.value:
                SwiftParsingKeyword.INLINE_COMMENT.value,
                CoreParsingKeyword.START_BLOCK_COMMENT.value:
                SwiftParsingKeyword.START_BLOCK_COMMENT.value,
                CoreParsingKeyword.STOP_BLOCK_COMMENT.value:
                SwiftParsingKeyword.STOP_BLOCK_COMMENT.value
            }
            entity_results_extracted_from_file = result.generate_entity_results_from_scopes(
                entity_keywords, match_expression, comment_keywords)

            # TODO: also add tokens from extensions

            entity_result: AbstractEntityResult
            for entity_result in entity_results_extracted_from_file:
                LOGGER.debug(f'{entity_result.entity_name=}')
                self._add_inheritance_to_entity_result(entity_result)
                entity_results[entity_result.entity_name] = entity_result

        # 2. if entity names are present in scanned tokens of file results, add to import dependencies
        for entity_name, entity_result in entity_results.items():
            for _, file_result in filtered_results.items():
                if entity_name in file_result.scanned_tokens and entity_result.scanned_file_name not in file_result.scanned_import_dependencies:

                    dependency = os.path.basename(
                        os.path.normpath(entity_result.scanned_file_name))

                    if self._is_dependency_in_ignore_list(
                            dependency, analysis):
                        LOGGER.debug(
                            f'ignoring dependency from {file_result.unique_name} to {dependency}'
                        )
                    else:
                        file_result.scanned_import_dependencies.append(
                            dependency)
                        LOGGER.debug(f'adding import: {dependency}')
Esempio n. 29
0
def get_ref_parser(settings):
    _ESCAPE = settings.escape_character
    _DOUBLE_ESCAPE = _ESCAPE + _ESCAPE

    _REF_OPEN, _REF_CLOSE = settings.reference_sentinels
    _REF_CLOSE_FIRST = _REF_CLOSE[0]
    _REF_ESCAPE_OPEN = _ESCAPE + _REF_OPEN
    _REF_ESCAPE_CLOSE = _ESCAPE + _REF_CLOSE
    _REF_DOUBLE_ESCAPE_OPEN = _DOUBLE_ESCAPE + _REF_OPEN
    _REF_DOUBLE_ESCAPE_CLOSE = _DOUBLE_ESCAPE + _REF_CLOSE
    _REF_EXCLUDES = _ESCAPE + _REF_OPEN + _REF_CLOSE

    _INV_OPEN, _INV_CLOSE = settings.export_sentinels
    _INV_CLOSE_FIRST = _INV_CLOSE[0]
    _INV_ESCAPE_OPEN = _ESCAPE + _INV_OPEN
    _INV_ESCAPE_CLOSE = _ESCAPE + _INV_CLOSE
    _INV_DOUBLE_ESCAPE_OPEN = _DOUBLE_ESCAPE + _INV_OPEN
    _INV_DOUBLE_ESCAPE_CLOSE = _DOUBLE_ESCAPE + _INV_CLOSE
    _INV_EXCLUDES = _ESCAPE + _INV_OPEN + _INV_CLOSE

    _EXCLUDES = _ESCAPE + _REF_OPEN + _REF_CLOSE + _INV_OPEN + _INV_CLOSE

    double_escape = pp.Combine(
        pp.Literal(_DOUBLE_ESCAPE) + pp.MatchFirst([
            pp.FollowedBy(_REF_OPEN),
            pp.FollowedBy(_REF_CLOSE),
            pp.FollowedBy(_INV_OPEN),
            pp.FollowedBy(_INV_CLOSE)
        ])).setParseAction(pp.replaceWith(_ESCAPE))

    ref_open = pp.Literal(_REF_OPEN).suppress()
    ref_close = pp.Literal(_REF_CLOSE).suppress()
    ref_not_open = ~pp.Literal(_REF_OPEN) + ~pp.Literal(
        _REF_ESCAPE_OPEN) + ~pp.Literal(_REF_DOUBLE_ESCAPE_OPEN)
    ref_not_close = ~pp.Literal(_REF_CLOSE) + ~pp.Literal(
        _REF_ESCAPE_CLOSE) + ~pp.Literal(_REF_DOUBLE_ESCAPE_CLOSE)
    ref_escape_open = pp.Literal(_REF_ESCAPE_OPEN).setParseAction(
        pp.replaceWith(_REF_OPEN))
    ref_escape_close = pp.Literal(_REF_ESCAPE_CLOSE).setParseAction(
        pp.replaceWith(_REF_CLOSE))
    ref_text = pp.CharsNotIn(_REF_EXCLUDES) | pp.CharsNotIn(_REF_CLOSE_FIRST,
                                                            exact=1)
    ref_content = pp.Combine(
        pp.OneOrMore(ref_not_open + ref_not_close + ref_text))
    ref_string = pp.MatchFirst(
        [double_escape, ref_escape_open, ref_escape_close,
         ref_content]).setParseAction(_tag_with(tags.STR))
    ref_item = pp.Forward()
    ref_items = pp.OneOrMore(ref_item)
    reference = (ref_open + pp.Group(ref_items) + ref_close).setParseAction(
        _tag_with(tags.REF))
    ref_item << (reference | ref_string)

    inv_open = pp.Literal(_INV_OPEN).suppress()
    inv_close = pp.Literal(_INV_CLOSE).suppress()
    inv_not_open = ~pp.Literal(_INV_OPEN) + ~pp.Literal(
        _INV_ESCAPE_OPEN) + ~pp.Literal(_INV_DOUBLE_ESCAPE_OPEN)
    inv_not_close = ~pp.Literal(_INV_CLOSE) + ~pp.Literal(
        _INV_ESCAPE_CLOSE) + ~pp.Literal(_INV_DOUBLE_ESCAPE_CLOSE)
    inv_escape_open = pp.Literal(_INV_ESCAPE_OPEN).setParseAction(
        pp.replaceWith(_INV_OPEN))
    inv_escape_close = pp.Literal(_INV_ESCAPE_CLOSE).setParseAction(
        pp.replaceWith(_INV_CLOSE))
    inv_text = pp.CharsNotIn(_INV_CLOSE_FIRST)
    inv_content = pp.Combine(pp.OneOrMore(inv_not_close + inv_text))
    inv_string = pp.MatchFirst(
        [double_escape, inv_escape_open, inv_escape_close,
         inv_content]).setParseAction(_tag_with(tags.STR))
    inv_items = pp.OneOrMore(inv_string)
    export = (inv_open + pp.Group(inv_items) + inv_close).setParseAction(
        _tag_with(tags.INV))

    text = pp.CharsNotIn(_EXCLUDES) | pp.CharsNotIn('', exact=1)
    content = pp.Combine(pp.OneOrMore(ref_not_open + inv_not_open + text))
    string = pp.MatchFirst(
        [double_escape, ref_escape_open, inv_escape_open,
         content]).setParseAction(_tag_with(tags.STR))

    item = reference | export | string
    line = pp.OneOrMore(item) + s_end
    return line.leaveWhitespace()
Esempio n. 30
0
    def _add_inheritance_to_entity_result(
            self, result: AbstractEntityResult) -> None:
        LOGGER.debug(
            f'extracting inheritance from entity result {result.entity_name}...'
        )
        for _, obj, following in self._gen_word_read_ahead(
                result.scanned_tokens):
            if obj == SwiftParsingKeyword.CLASS.value:
                read_ahead_string = self.create_read_ahead_string(
                    obj, following)

                entity_name = pp.Word(pp.alphanums)
                expression_to_match = pp.Keyword(SwiftParsingKeyword.CLASS.value) + entity_name.setResultsName(CoreParsingKeyword.ENTITY_NAME.value) + pp.Keyword(CoreParsingKeyword.COLON.value) + \
                    entity_name.setResultsName(CoreParsingKeyword.INHERITED_ENTITY_NAME.value) + pp.SkipTo(pp.FollowedBy(SwiftParsingKeyword.OPEN_SCOPE.value))

                try:
                    parsing_result = expression_to_match.parseString(
                        read_ahead_string)
                except:
                    result.analysis.statistics.increment(
                        Statistics.Key.PARSING_MISSES)
                    LOGGER.warning(
                        f'warning: could not parse result {result=}')
                    LOGGER.warning(
                        f'next tokens: {[obj] + following[:AbstractParsingCore.Constants.MAX_DEBUG_TOKENS_READAHEAD.value]}'
                    )
                    continue

                if len(parsing_result) > 0:
                    parsing_result = expression_to_match.parseString(
                        read_ahead_string)

                    if getattr(parsing_result,
                               CoreParsingKeyword.INHERITED_ENTITY_NAME.value
                               ) is not None and bool(
                                   getattr(
                                       parsing_result, CoreParsingKeyword.
                                       INHERITED_ENTITY_NAME.value)):

                        result.analysis.statistics.increment(
                            Statistics.Key.PARSING_HITS)
                        LOGGER.debug(
                            f'found inheritance class {getattr(parsing_result, CoreParsingKeyword.INHERITED_ENTITY_NAME.value)} for entity name: {getattr(parsing_result, CoreParsingKeyword.ENTITY_NAME.value)} and added to result'
                        )
                        result.scanned_inheritance_dependencies.append(
                            getattr(
                                parsing_result, CoreParsingKeyword.
                                INHERITED_ENTITY_NAME.value))