Beispiel #1
0
    def get_fragment_grammar():

        # Match header [mapping]
        header = Suppress("[") + Suppress("mapping") + Suppress("]")

        # There are three possible patterns for mapping entries:
        #       obj:symbol (scheme)
        #       obj (scheme)
        #       * (scheme)
        obj = Fragment.ENTITY.setResultsName("object")
        symbol = Suppress(":") + Fragment.IDENTIFIER.setResultsName("symbol")
        scheme = Suppress("(") + Fragment.IDENTIFIER.setResultsName(
            "scheme") + Suppress(")")

        pattern1 = Group(obj + symbol + scheme)
        pattern2 = Group(obj + scheme)
        pattern3 = Group(
            Literal(Mapping.MAPPING_ALL_OBJECTS).setResultsName("object") +
            scheme)

        mapping_entry = pattern1 | pattern2 | pattern3

        # To simplify parsing, classify groups of condition-mapping entry into two types: normal and default
        # A normal grouping is one with a non-default condition. The default grouping is one which contains the
        # default condition
        mapping_entries = Group(
            ZeroOrMore(mapping_entry)).setResultsName("mappings")

        normal_condition = Suppress(":") + originalTextFor(
            SDKConfig.get_expression_grammar())
        default_condition = Optional(
            Suppress(":") + Literal(Mapping.DEFAULT_CONDITION))

        normal_group = Group(
            normal_condition.setResultsName("condition") + mapping_entries)
        default_group = Group(default_condition +
                              mapping_entries).setResultsName("default_group")

        normal_groups = Group(
            ZeroOrMore(normal_group)).setResultsName("normal_groups")

        # Any mapping fragment definition can have zero or more normal group and only one default group as a last entry.
        archive = Suppress("archive") + Suppress(
            ":") + Fragment.ENTITY.setResultsName("archive")
        entries = Suppress("entries") + Suppress(":") + (
            normal_groups + default_group).setResultsName("entries")

        mapping = Group(header + archive + entries)

        mapping.setParseAction(lambda t: Mapping(t[0].archive, t[0].entries))

        mapping.ignore("#" + restOfLine)

        return mapping
Beispiel #2
0
    def get_fragment_grammar():
        name = Fragment.IDENTIFIER
        header = Suppress("[") + Suppress("sections") + Suppress(":") + name.setResultsName("name") + Suppress("]")
        entry = Word(alphanums + "+" + ".")
        entries = Suppress("entries") + Suppress(":") + Group(OneOrMore(entry)).setResultsName("entries")

        sections = Group(header + entries)

        sections.setParseAction(lambda t: Sections(t[0].name, t[0].entries))

        sections.ignore("#" + restOfLine)

        return sections
Beispiel #3
0
def parse_config_file(filepath):
    """
    This function defines that to parsed the netscalar input file
    :param filepath: path of netscalar input configuration
    :return: return parsed dict
    """

    EOL = LineEnd().suppress()
    comment = Suppress("#") + Suppress(restOfLine) + EOL
    SOL = LineStart().suppress()
    blank_line = SOL + EOL
    result = []
    hyphen = Literal("-")
    not_hyphen_sign = ''.join(c for c in printables if c != '-')
    text = Word(not_hyphen_sign, printables)
    key = Word('-',
               printables).setParseAction(lambda t: t[0].replace('-', '', 1))
    val = originalTextFor(Optional(ZeroOrMore(text), default=None))
    option = Group(key + val)
    multi_word_names = quotedString
    q_obj = originalTextFor(Keyword('q{') + SkipTo(Keyword("}")))
    command = Group(
        OneOrMore(q_obj | multi_word_names | text) + ZeroOrMore(option))
    command.ignore(comment | blank_line)
    with open(filepath) as infile:
        line_no = 1
        print("Parsing Input Configuration...")
        lines = infile.readlines()
        total_lines = len(lines)
        for line in lines:
            try:
                tmp = command.parseString(line)
                tokens = tmp.asList()
                if tokens:
                    tokens[0].append(['line_no', str(line_no)])
                result += tokens
                line_no += 1
            except Exception as exception:
                line_no += 1
                LOG.error("Parsing error: " + line)
            msg = "Parsing started..."
            if line_no <= total_lines:
                ns_util.print_progress_bar(line_no,
                                           total_lines,
                                           msg,
                                           prefix='Progress',
                                           suffix='')
        return result
Beispiel #4
0
    def get_fragment_grammar():
        name = Fragment.IDENTIFIER
        header = Suppress("[") + Suppress("scheme") + Suppress(":") + name.setResultsName("name") + Suppress("]")

        # Scheme entry in the form 'sections -> target'
        sections = Fragment.IDENTIFIER
        target = Fragment.IDENTIFIER
        entry = Group(sections.setResultsName("sections") + Suppress("->") + target.setResultsName("target"))

        entries = Suppress("entries") + Suppress(":") + Group(OneOrMore(entry)).setResultsName("entries")

        scheme = Group(header + entries)

        scheme.setParseAction(lambda t: Scheme(t[0].name, t[0].entries))

        scheme.ignore("#" + restOfLine)

        return scheme
Beispiel #5
0
def parse_config_file(filepath):
    """
    This function defines that to parsed the netscalar input file
    :param filepath: path of netscalar input configuration
    :return: return parsed dict
    """

    EOL = LineEnd().suppress()
    comment = Suppress("#") + Suppress(restOfLine) + EOL
    SOL = LineStart().suppress()
    blank_line = SOL + EOL
    result = []
    hyphen = Literal("-")
    not_hyphen_sign = ''.join(c for c in printables if c != '-')
    text = Word(not_hyphen_sign, printables)
    key = Word('-', printables).setParseAction(
        lambda t: t[0].replace('-', '', 1))
    val = originalTextFor(Optional(ZeroOrMore(text), default=None))
    option = Group(key + val)
    multi_word_names = quotedString
    q_obj = originalTextFor(Keyword('q{')+SkipTo(Keyword("}")))
    command = Group(OneOrMore(q_obj | multi_word_names | text) + ZeroOrMore(option))
    command.ignore(comment | blank_line)
    with open(filepath) as infile:
        line_no = 1
        print "Parsing Input Configuration..."
        lines = infile.readlines()
        total_lines = len(lines)
        for line in lines:
            try:
                tmp = command.parseString(line)
                tokens = tmp.asList()
                if tokens:
                    tokens[0].append(['line_no', str(line_no)])
                result += tokens
                line_no += 1
            except Exception as exception:
                line_no += 1
                LOG.error("Parsing error: " + line)
            msg = "Parsing started..."
            if line_no <= total_lines:
                ns_util.print_progress_bar(line_no, total_lines, msg, prefix='Progress',
                                 suffix='')
        return result
Beispiel #6
0
def PartitionParser():
    start_ = Suppress('start') + Suppress('=') + Word(nums)
    size_ = Suppress('size') + Suppress('=') + Word(nums)
    id_ = Suppress('Id') + Suppress('=') + Word(nums)
    device_ = Word(alphas+nums+'/')
    comment_ = '#' + Optional(restOfLine)
    warning_ = 'Warning:' + Optional(restOfLine)
    unit_ = Literal('unit') + Optional(Suppress(':') + Word(alphas + nums)+ restOfLine)

    pinfo = start_ + Suppress(',') 
    pinfo += size_ + Suppress(',')
    pinfo += id_ + restOfLine
    partition = Group(device_ + Suppress(':') + pinfo)
    partition.ignore(comment_)
    partition.ignore(warning_)
    partition.ignore(unit_)
    #partition = ZeroOrMore(partition)
    
    return Dict(ZeroOrMore(partition))
Beispiel #7
0
    def get_fragment_grammar():

        # Match header [mapping]
        header = Suppress("[") + Suppress("mapping") + Suppress("]")

        # There are three possible patterns for mapping entries:
        #       obj:symbol (scheme)
        #       obj (scheme)
        #       * (scheme)
        obj = Fragment.ENTITY.setResultsName("object")
        symbol = Suppress(":") + Fragment.IDENTIFIER.setResultsName("symbol")
        scheme = Suppress("(") + Fragment.IDENTIFIER.setResultsName("scheme") + Suppress(")")

        pattern1 = Group(obj + symbol + scheme)
        pattern2 = Group(obj + scheme)
        pattern3 = Group(Literal(Mapping.MAPPING_ALL_OBJECTS).setResultsName("object") + scheme)

        mapping_entry = pattern1 | pattern2 | pattern3

        # To simplify parsing, classify groups of condition-mapping entry into two types: normal and default
        # A normal grouping is one with a non-default condition. The default grouping is one which contains the
        # default condition
        mapping_entries = Group(ZeroOrMore(mapping_entry)).setResultsName("mappings")

        normal_condition = Suppress(":") + originalTextFor(SDKConfig.get_expression_grammar())
        default_condition = Optional(Suppress(":") + Literal(Mapping.DEFAULT_CONDITION))

        normal_group = Group(normal_condition.setResultsName("condition") + mapping_entries)
        default_group = Group(default_condition + mapping_entries).setResultsName("default_group")

        normal_groups = Group(ZeroOrMore(normal_group)).setResultsName("normal_groups")

        # Any mapping fragment definition can have zero or more normal group and only one default group as a last entry.
        archive = Suppress("archive") + Suppress(":") + Fragment.ENTITY.setResultsName("archive")
        entries = Suppress("entries") + Suppress(":") + (normal_groups + default_group).setResultsName("entries")

        mapping = Group(header + archive + entries)

        mapping.setParseAction(lambda t: Mapping(t[0].archive, t[0].entries))

        mapping.ignore("#" + restOfLine)

        return mapping
Beispiel #8
0
def PartitionParser():
    start_ = Suppress('start') + Suppress('=') + Word(nums)
    size_ = Suppress('size') + Suppress('=') + Word(nums)
    id_ = Suppress('Id') + Suppress('=') + Word(nums)
    device_ = Word(alphas + nums + '/')
    comment_ = '#' + Optional(restOfLine)
    warning_ = 'Warning:' + Optional(restOfLine)
    unit_ = Literal('unit') + Optional(
        Suppress(':') + Word(alphas + nums) + restOfLine)

    pinfo = start_ + Suppress(',')
    pinfo += size_ + Suppress(',')
    pinfo += id_ + restOfLine
    partition = Group(device_ + Suppress(':') + pinfo)
    partition.ignore(comment_)
    partition.ignore(warning_)
    partition.ignore(unit_)
    #partition = ZeroOrMore(partition)

    return Dict(ZeroOrMore(partition))
Beispiel #9
0
    def parse_pabl(self, raw_pabl):
        INDENT = lineEnd.suppress() + empty + empty.copy().setParseAction(
            self.check_sub_indent)
        UNDENT = FollowedBy(empty).setParseAction(self.check_unindent)
        UNDENT.setParseAction(self.unindent)

        terminator = Literal(';').suppress()
        comment = Literal('#') + restOfLine
        item_name = Word(alphas, alphanums + '_')
        variable = Word(alphas, alphanums + '_.')
        variable_as = (variable + 'as' + item_name)

        stmt = Forward()
        suite = Group(
            OneOrMore(empty + stmt.setParseAction(self.check_peer_indent)))
        suite.ignore(comment)

        item_start = Literal('@item').suppress()
        item_end = Literal(':').suppress()
        permission_start = Literal('@permissions')

        item_decl = (item_start + item_name.setResultsName('item') + item_end)
        item_defn = Group(item_decl + INDENT + suite + UNDENT)

        permission_decl = (permission_start + Group(
            delimitedList(item_name).setResultsName('permissions')) + item_end)
        permission_defn = Group(permission_decl + INDENT + suite + UNDENT)

        fieldList = delimitedList(
            Group(variable_as) | variable
        ).setResultsName('fields') + terminator

        stmt << (item_defn | fieldList | Group(permission_defn))

        parseTree = suite.parseString(raw_pabl)

        return parseTree
Beispiel #10
0
def processFile(fname):
    # Set the standard items, including what to ignore
    data, fout = fOpen(fname+'.txt', 'r', fname+'.csv')
    NL, date , time = setStds()
    level, date_line, time_line, source, eventID, taskCat, info = setLines(NL, date, time)
    irrelevant_data = MatchFirst(['-','"']) + restOfLine

    # Define what a record will look like
    record = Group((level + date_line + time_line + source + eventID + taskCat + info))
    record.ignore(irrelevant_data)

    # Find records in the text file
    records = OneOrMore(record).searchString(data)

    # Write the header for the csv file - followed by each line, remove any commas from file.
    fout.write("Level,Date,Time,EventID,TaskCategory,Info\n")
    for rec in records:
        for i in rec:
            #print rec[1], rec[2]
            for index in range(len(i)):
                i[index] = i[index].replace(',','')
            fout.write("%(lvl)s, %(eDate)s, %(eTime)s, %(eID)s, %(tCat)s, %(info)s\n" % i)

    print "Processing Completed"
Beispiel #11
0
    def get_fragment_grammar(sdkconfig, fragment_file):

        # Match header [mapping]
        header = Suppress("[") + Suppress("mapping") + Suppress("]")

        # There are three possible patterns for mapping entries:
        #       obj:symbol (scheme)
        #       obj (scheme)
        #       * (scheme)
        obj = Fragment.ENTITY.setResultsName("object")
        symbol = Suppress(":") + Fragment.IDENTIFIER.setResultsName("symbol")
        scheme = Suppress("(") + Fragment.IDENTIFIER.setResultsName(
            "scheme") + Suppress(")")

        pattern1 = Group(obj + symbol + scheme)
        pattern2 = Group(obj + scheme)
        pattern3 = Group(
            Literal(Mapping.MAPPING_ALL_OBJECTS).setResultsName("object") +
            scheme)

        mapping_entry = pattern1 | pattern2 | pattern3

        # To simplify parsing, classify groups of condition-mapping entry into two types: normal and default
        # A normal grouping is one with a non-default condition. The default grouping is one which contains the
        # default condition
        mapping_entries = Group(
            ZeroOrMore(mapping_entry)).setResultsName("mappings")

        normal_condition = Suppress(":") + originalTextFor(
            SDKConfig.get_expression_grammar())
        default_condition = Optional(
            Suppress(":") + Literal(DeprecatedMapping.DEFAULT_CONDITION))

        normal_group = Group(
            normal_condition.setResultsName("condition") + mapping_entries)
        default_group = Group(default_condition +
                              mapping_entries).setResultsName("default_group")

        normal_groups = Group(
            ZeroOrMore(normal_group)).setResultsName("normal_groups")

        # Any mapping fragment definition can have zero or more normal group and only one default group as a last entry.
        archive = Suppress("archive") + Suppress(
            ":") + Fragment.ENTITY.setResultsName("archive")
        entries = Suppress("entries") + Suppress(":") + (
            normal_groups + default_group).setResultsName("entries")

        mapping = Group(header + archive + entries)
        mapping.ignore("#" + restOfLine)

        def parsed_deprecated_mapping(pstr, loc, toks):
            fragment = Mapping()
            fragment.archive = toks[0].archive
            fragment.name = re.sub(r"[^0-9a-zA-Z]+", "_", fragment.archive)
            fragment.deprecated = True

            fragment.entries = set()
            condition_true = False
            for entries in toks[0].entries[0]:
                condition = next(iter(entries.condition.asList())).strip()
                condition_val = sdkconfig.evaluate_expression(condition)

                if condition_val:
                    for entry in entries[1]:
                        fragment.entries.add(
                            (entry.object,
                             None if entry.symbol == '' else entry.symbol,
                             entry.scheme))
                    condition_true = True
                    break

            if not fragment.entries and not condition_true:
                try:
                    entries = toks[0].entries[1][1]
                except IndexError:
                    entries = toks[0].entries[1][0]
                for entry in entries:
                    fragment.entries.add(
                        (entry.object,
                         None if entry.symbol == '' else entry.symbol,
                         entry.scheme))

            if not fragment.entries:
                fragment.entries.add(("*", None, "default"))

            dep_warning = str(
                ParseFatalException(
                    pstr, loc,
                    "Warning: Deprecated old-style mapping fragment parsed in file %s."
                    % fragment_file))

            print(dep_warning)
            return fragment

        mapping.setParseAction(parsed_deprecated_mapping)
        return mapping
Beispiel #12
0
    :param string: the string from which the module has been parsed
    :param location: the index of `string` at which the parsed module starts
    :param tokens: the list of sections representing the parsed module

    :rtype: :class:`pynusmv.model.ModuleMetaClass`

    """

    from .model import ModuleMetaClass, Module as ModuleClass

    name = tokens[0]
    args = tokens[1]
    namespace = OrderedDict()
    namespace["NAME"] = name
    namespace["ARGS"] = args
    for section in tokens[2:]:
        if section.name not in namespace:
            namespace[section.name] = section.body
        else:
            update(namespace[section.name], section.body)
    return ModuleMetaClass(str(name), (ModuleClass,), namespace)


module.setParseAction(_create_module)

# Model declaration
comment = ("--" + restOfLine).suppress()
model = Group(OneOrMore(module))
model.ignore(comment)
Beispiel #13
0
def extract_structs(in_fname, out_fname):
    """
    Extract a C struct and write its corresponding C compatible python object
    Args:
        in_fname: Filename to read structs from
        out_fname: Filename to write structs to.

    Returns:
        A dict of c_structs written.
    """
    global anon_types
    global c_struct
    with open(in_fname, 'r') as header:
        sample = header.read()
    identifier = Word(alphas + '_', alphanums + '_')
    integer = Word(alphanums + '_')
    struct_array = _lsq_bracket + integer('length') + _rsq_bracket
    pointer = Word('*', max=1)
    struct_param = Group(
        identifier('param_type') + Optional(pointer('ptr')) +
        identifier('param_name') + Optional(struct_array), )

    simple_param = Group(struct_param + _semi_colon)

    # Anonymous structs (nested)
    inner_param_list = Group(ZeroOrMore(simple_param('inner_list')))
    anonymous_struct_param = Group(
        _struct + Optional(identifier('inner_struct')) + _lcurl +
        inner_param_list('inner_param_list') + _rcurl +
        identifier('inner_param_name') + Optional(struct_array) +
        _semi_colon, )
    anonymous_struct_param.ignore(cppStyleComment)

    # Anonymous unions (nested)
    anonymous_union_param = Group(
        _union + Optional(identifier('inner_union')) + _lcurl +
        inner_param_list('inner_param_list') + _rcurl +
        identifier('inner_param_name') + Optional(struct_array) +
        _semi_colon, )

    # A struct can contain a list of
    # 1. simple param types params name;
    # 2. Anonymous nested structs.
    # 3. Anonymous nested unions.
    struct_list = Group(
        ZeroOrMore(
            simple_param('simple_param')
            | anonymous_struct_param('anon_struct')
            | anonymous_union_param('anon_union'), ), )

    struct = _struct + Optional(identifier('struct_prefix')) + _lcurl + \
             struct_list('list') + _rcurl + \
             Optional(identifier('struct_postfix')) + _semi_colon
    struct.ignore(cppStyleComment)

    for item, _, _ in struct.scanString(sample):  # item, start, stop
        struct_name = item.struct_postfix
        params = collections.OrderedDict()
        for parsed_entry in item.list:

            # Classify the struct member.
            if (parsed_entry.getName() == "anon_struct"):
                param_type, param_name, ptr, length = \
                    extract_inner_struct(parsed_entry, out_fname)
                anon_types[param_type] = parsed_entry
            elif (parsed_entry.getName() == "anon_union"):
                param_type, param_name, ptr, length = \
                    extract_inner_union(parsed_entry, out_fname)
                anon_types[param_type] = parsed_entry
            elif (parsed_entry.getName() == "simple_param"):
                param_type, param_name, ptr, length = \
                    extract_simple_param(parsed_entry)
            else:
                assert False, "Invalid parse"

            # Normalize the parameter type
            param_name, p_type = normalize_param(
                param_type,
                param_name,
                ptr,
                length,
            )
            params[param_name] = p_type
            c_struct[struct_name] = params

        # Write out the struct
        _write_class(out_fname, struct_name, params, "Structure")
    return c_struct
Beispiel #14
0
              lambda s, l, t: t.asList()) + RBRACE))
VARIABLE = (IDENTIFIER("name") + Optional(LBRACK + SIZE("size") + RBRACK))
SCALARTYPE = Or(map(Keyword, "int real logical flag char string".split()))
STRUCTMEMBERS = Forward()
STRUCTTYPE = Keyword("struct") + LBRACE + STRUCTMEMBERS("members") + RBRACE
DECLARATION = ((SCALARTYPE | STRUCTTYPE)("type") +
               Optional(LBRACK + SIZE + RBRACK) + VARIABLE)
STRUCTMEMBERS << Group(ZeroOrMore(Group(DECLARATION + SEMI)))
DECL_ASS_STMT = DECLARATION + Optional(EQUAL + VALUE("value")) + SEMI

SECTION = (Keyword('section') + IDENTIFIER('name') + LBRACE +
           Group(OneOrMore(Group(DECL_ASS_STMT)))('declarations') + RBRACE +
           SEMI)
COMMENT = '#' + restOfLine  # todo: could potentially match '#' within strings?
CONFIG = Group(OneOrMore(Group(SECTION)))
CONFIG.ignore(COMMENT)


def walk(ns, declaration, value):
    # walks through declaration tokens recursively and constructs namespace
    if declaration.type[0] == 'struct':
        if declaration.size:
            subdecs_vals = []
            for i, v in enumerate(value):
                fake_declaration = declaration.copy()
                fake_declaration.name = i
                fake_declaration.size = 0  # prevents infinite recursion
                subdecs_vals.append((fake_declaration, v))

            ns[declaration.name] = [None] * len(value)
        else:
Beispiel #15
0
            | constKwd + structArgument \
            | enumKwd + enumName + argumentName \
            | constCommonArgument \
            | commonArgument \
            | typedefName + argumentName \
            | voidKwd

funName = Word(alphas)
argumentList = Optional(delimitedList(Group(funArgument)))
# function declaration
function = Group(funReturnType).setResultsName('returnType') \
         + funName.setResultsName('functionName') \
         + '(' \
         + argumentList.setResultsName('arguments') \
         + ');'
function.ignore(cStyleComment)

# function typedef
funTypedefName = funName
funTypedef = typedefKwd \
           + Group(funReturnType).setResultsName('returnType') \
           + '(' \
           + pointerSymbol \
           + funTypedefName.setResultsName('functionName') \
           + ')' \
           + '(' \
           + argumentList.setResultsName('arguments') \
           + ');'
funTypedef.ignore(cStyleComment)
declaration = funTypedef.setResultsName('result') | 'DLL_PUBLIC' + function.setResultsName('result')
Beispiel #16
0
def parse(string=None, filename=None, token=None, lang=None):
    """
    Parse a token stream from or raise a SyntaxError

    This function includes the parser grammar.
    """

    if not lang:
        lang = guess_language(string, filename)

    #
    # End of Line
    #
    EOL = Suppress(lineEnd)
    UTFWORD = Word(unicodePrintables)

    #
    # @tag
    #
    TAG = Suppress('@') + UTFWORD

    #
    # A table
    #
    # A table is made up of rows of cells, e.g.
    #
    #   | column 1 | column 2 |
    #
    # Table cells need to be able to handle escaped tokens such as \| and \n
    #
    def handle_esc_char(tokens):
        token = tokens[0]

        if token == r'\|':
            return u'|'
        elif token == r'\n':
            return u'\n'
        elif token == r'\\':
            return u'\\'

        raise NotImplementedError(u"Unknown token: %s" % token)

    ESC_CHAR = Word(initChars=r'\\', bodyChars=unicodePrintables, exact=2)
    ESC_CHAR.setParseAction(handle_esc_char)

    #
    # A cell can contain anything except a cell marker, new line or the
    # beginning of a cell marker, we then handle escape characters separately
    # and recombine the cell afterwards
    #
    CELL = OneOrMore(CharsNotIn('|\n\\') + Optional(ESC_CHAR))
    CELL.setParseAction(lambda tokens: u''.join(tokens))

    TABLE_ROW = Suppress('|') + OneOrMore(CELL + Suppress('|')) + EOL
    TABLE_ROW.setParseAction(lambda tokens: [v.strip() for v in tokens])
    TABLE = Group(OneOrMore(Group(TABLE_ROW)))

    #
    # Multiline string
    #
    def clean_multiline_string(s, loc, tokens):
        """
        Clean a multiline string

        The indent level of a multiline string is the indent level of the
        triple-". We have to derive this by walking backwards from the
        location of the quoted string token to the newline before it.

        We also want to remove the leading and trailing newline if they exist.

        FIXME: assumes UNIX newlines
        """

        def remove_indent(multiline, indent):
            """
            Generate the lines removing the indent
            """

            for line in multiline.splitlines():
                if line and not line[:indent].isspace():
                    warn("%s: %s: under-indented multiline string "
                         "truncated: '%s'" %
                         (lineno(loc, s), col(loc, s), line),
                         LettuceSyntaxWarning)

                # for those who are surprised by this, slicing a string
                # shorter than indent will yield empty string, not IndexError
                yield line[indent:]

        # determine the indentation offset
        indent = loc - s.rfind('\n', 0, loc) - 1

        multiline = '\n'.join(remove_indent(tokens[0], indent))

        # remove leading and trailing newlines
        if multiline[0] == '\n':
            multiline = multiline[1:]

        if multiline[-1] == '\n':
            multiline = multiline[:-1]

        return multiline

    MULTILINE = QuotedString('"""', multiline=True)
    MULTILINE.setParseAction(clean_multiline_string)

    # A Step
    #
    # Steps begin with a keyword such as Given, When, Then or And They can
    # contain an optional inline comment, although it's possible to encapsulate
    # it in a string. Finally they can contain a table or a multiline 'Python'
    # string.
    #
    # <variables> are not parsed as part of the grammar as it's not easy to
    # distinguish between a variable and XML. Instead scenarios will replace
    # instances in the steps based on the outline keys.
    #
    STATEMENT_SENTENCE = Group(
        lang.STATEMENT +  # Given, When, Then, And
        OneOrMore(UTFWORD.setWhitespaceChars(' \t') |
                  quotedString.setWhitespaceChars(' \t')) +
        EOL
    )

    STATEMENT = Group(
        STATEMENT_SENTENCE('sentence') +
        Optional(TABLE('table') | MULTILINE('multiline'))
    )
    STATEMENT.setParseAction(Step)

    STATEMENTS = Group(ZeroOrMore(STATEMENT))

    #
    # Background:
    #
    BACKGROUND_DEFN = \
        lang.BACKGROUND('keyword') + Suppress(':') + EOL
    BACKGROUND_DEFN.setParseAction(Background)

    BACKGROUND = Group(
        BACKGROUND_DEFN('node') +
        STATEMENTS('statements')
    )
    BACKGROUND.setParseAction(Background.add_statements)

    #
    # Scenario: description
    #
    SCENARIO_DEFN = Group(
        Group(ZeroOrMore(TAG))('tags') +
        lang.SCENARIO('keyword') + Suppress(':') +
        restOfLine('name') +
        EOL
    )
    SCENARIO_DEFN.setParseAction(Scenario)

    SCENARIO = Group(
        SCENARIO_DEFN('node') +
        STATEMENTS('statements') +
        Group(ZeroOrMore(
            Suppress(lang.EXAMPLES + ':') + EOL + TABLE
        ))('outlines')
    )
    SCENARIO.setParseAction(Scenario.add_statements)

    #
    # Feature: description
    #
    FEATURE_DEFN = Group(
        Group(ZeroOrMore(TAG))('tags') +
        lang.FEATURE('keyword') + Suppress(':') +
        restOfLine('name') +
        EOL
    )
    FEATURE_DEFN.setParseAction(Feature)

    #
    # A description composed of zero or more lines, before the
    # Background/Scenario block
    #
    DESCRIPTION_LINE = Group(
        ~BACKGROUND_DEFN + ~SCENARIO_DEFN +
        OneOrMore(UTFWORD).setWhitespaceChars(' \t') +
        EOL
    )
    DESCRIPTION = Group(ZeroOrMore(DESCRIPTION_LINE | EOL))
    DESCRIPTION.setParseAction(Description)

    #
    # Complete feature file definition
    #
    FEATURE = Group(
        FEATURE_DEFN('node') +
        DESCRIPTION('description') +
        Optional(BACKGROUND('background')) +
        Group(OneOrMore(SCENARIO))('scenarios') +
        stringEnd)
    FEATURE.ignore(pythonStyleComment)
    FEATURE.setParseAction(Feature.add_blocks)

    #
    # Try parsing the string
    #

    if not token:
        token = FEATURE
    else:
        token = locals()[token]

    try:
        if string:
            tokens = token.parseString(string)
        elif filename:
            with open(filename, 'r', 'utf-8') as fp:
                tokens = token.parseFile(fp)
        else:
            raise RuntimeError("Must pass string or filename")

        return tokens
    except ParseException as e:
        if e.parserElement == stringEnd:
            msg = "Expected EOF (max one feature per file)"
        else:
            msg = e.msg

        raise LettuceSyntaxError(
            filename,
            u"{lineno}:{col} Syntax Error: {msg}\n{line}\n{space}^".format(
                msg=msg,
                lineno=e.lineno,
                col=e.col,
                line=e.line,
                space=' ' * (e.col - 1)))
    except LettuceSyntaxError as e:
        # reraise the exception with the filename
        raise LettuceSyntaxError(filename, e.string)
Beispiel #17
0
    :param string: the string from which the module has been parsed
    :param location: the index of `string` at which the parsed module starts
    :param tokens: the list of sections representing the parsed module

    :rtype: :class:`pynusmv.model.ModuleMetaClass`

    """
    # pylint: disable=unused-argument

    from .model import ModuleMetaClass, Module as ModuleClass

    name = tokens[0]
    args = tokens[1]
    namespace = OrderedDict()
    namespace["NAME"] = name
    namespace["ARGS"] = args
    for section in tokens[2:]:
        if section.name not in namespace:
            namespace[section.name] = section.body
        else:
            update(namespace[section.name], section.body)
    return ModuleMetaClass(str(name), (ModuleClass, ), namespace)


module.setParseAction(_create_module)

# Model declaration
comment = ("--" + restOfLine).suppress()
model = Group(OneOrMore(module))
model.ignore(comment)
Beispiel #18
0
# Plugin Attribute
#
attrName = nameType
attrValue = integer ^ real ^ color ^ acolor ^ nameType ^ output ^ quotedString.setParseAction(no_quotes)

pluginAttr = Group(attrName + equals + attrValue + semi)

# Plugin
#
pluginType = Word(alphanums)
pluginName = Word(alphanums + "@_")

pluginDesc = Group(pluginType + pluginName + lbrace + Group(ZeroOrMore(pluginAttr)) + rbrace).setParseAction(
    getPluginDesc
)
pluginDesc.ignore("//" + restOfLine)
pluginDesc.ignore(cStyleComment)

# Scene
#
sceneDesc = OneOrMore(pluginDesc)
sceneDesc.ignore("//" + restOfLine)
sceneDesc.ignore(cStyleComment)

nameParser = ZeroOrMore(Group(pluginType + pluginName + lbrace))
nameParser.ignore("//" + restOfLine)
nameParser.ignore(cStyleComment)


def ParseVrscene(filepath):
    return sceneDesc.parseString(open(filepath, "r").read())