Ejemplo n.º 1
0
def get_sql_grammar():
    '''
    Creates a grammar for parsing SQL queries.

    Returns:
       MatchFirst: SQL parser.
    '''
    select = pp.Regex('select', flags=re.I) \
        .setParseAction(lambda s, l, t: 'select') \
        .setResultsName('operator')
    from_ = pp.Suppress(pp.Regex('from', flags=re.I))
    table = (from_ + pp.Regex('[a-z]+', flags=re.I)) \
        .setParseAction(lambda s, l, t: t[0]) \
        .setResultsName('table')
    regex = pp.Regex('~|regex').setParseAction(lambda s, l, t: '~')
    not_regex = pp.Regex('!~|not regex').setParseAction(lambda s, l, t: '!~')
    any_op = pp.Regex('[^ ]*')
    operator = pp.Or([not_regex, regex, any_op]).setResultsName('operator')
    quote = pp.Suppress(pp.Optional("'"))
    value = (quote + pp.Regex('[^\']+', flags=re.I) + quote) \
        .setResultsName('value') \
        .setParseAction(lambda s, l, t: t[0])
    columns = pp.delimitedList(pp.Regex('[^, ]*'), delim=pp.Regex(', *')) \
        .setResultsName('display_columns')
    column = pp.Regex('[a-z]+', flags=re.I).setResultsName('column')
    conditional = column + operator + value
    head = select + columns + table
    grammar = head | conditional
    return grammar
Ejemplo n.º 2
0
def _MakeReactionSideParser():
    """Builds a parser for a side of a reaction."""
    # Coefficients are usually integral, but they can be floats or fractions too.
    int_coeff = pyparsing.Word(pyparsing.nums)
    float_coeff = pyparsing.Word(pyparsing.nums + '.' + pyparsing.nums)
    frac_coeff = int_coeff + '/' + int_coeff
    int_coeff.setParseAction(lambda i:int(i[0]))
    float_coeff.setParseAction(lambda t:float(t[0]))
    frac_coeff.setParseAction(lambda f:float(f[0])/float(f[2]))
    
    coeff = pyparsing.Or([int_coeff, float_coeff, frac_coeff])
    optional_coeff = pyparsing.Optional(coeff)
    
    compound_separator = pyparsing.Literal('+').suppress()
    
    compound_name_component = pyparsing.Word(pyparsing.alphanums + "()",
                                             pyparsing.alphanums + "-+,()'_")
    compound_name = pyparsing.Forward()
    compound_name << (compound_name_component + pyparsing.ZeroOrMore(compound_name_component))
    compound_name.setParseAction(lambda s: ' '.join(s))
    
    compound_with_coeff = pyparsing.Forward()
    compound_with_coeff << ((optional_coeff + compound_name) | compound_name)
    compound_with_coeff.setParseAction(_parsedCompound)
    compound_with_coeff.setResultsName("compound")
    
    compound_with_separator = pyparsing.Forward()
    compound_with_separator << (compound_with_coeff + compound_separator)
    
    reaction_side = pyparsing.Forward()
    reaction_side << (pyparsing.ZeroOrMore(compound_with_separator) +
                      compound_with_coeff)
    reaction_side.setParseAction(lambda l: [l])
    reaction_side.setResultsName("reaction_side")
    return reaction_side
Ejemplo n.º 3
0
 def test_none_name2(self):
     grammar = pp.Or(["foo", "bar"]) + pp.Word(pp.nums).setName("integer")
     railroad = to_railroad(grammar)
     assert len(railroad) == 2
     assert railroad[0].name is not None
     railroad = to_railroad(grammar, show_results_names=True)
     assert len(railroad) == 2
Ejemplo n.º 4
0
def expression_parser():
  """A function returning a (pyparsing) parser for parsing C expressions.

  Returns:
    a (pyparsing) parser for parsing C expressions.
  """
  precedence = []
  for operators, arity, associativity in _PRECEDENCE:
    if arity <= 2:
      operators = pyparsing.Or(map(pyparsing.Literal, operators))
    else:
      operators = tuple(map(pyparsing.Literal, operators))
    precedence.append((
        operators,
        arity,
        associativity,
        _construct_operator(arity),
    ))
  expression = pyparsing.Forward()
  # pylint: disable=expression-not-assigned
  expression << pyparsing.infixNotation(
      baseExpr=_base_or_array_expression(expression),
      opList=precedence,
      lpar=pyparsing.NoMatch(),
      rpar=pyparsing.NoMatch(),
  )
  expression.ignore(pyparsing.cppStyleComment)
  return expression
Ejemplo n.º 5
0
def _get_anchor(flag=False):
    """Get anchor parser element.

    :param bool flag: toggle debug messages on/off

    anchor = "#", heading_text | "top";

    :returns: anchor
    :rtype: ParserElement
    """
    try:
        heading_text = layout.get_heading_text(flag=flag)
        top = pyparsing.Literal("top")
        anchor = pyparsing.Combine(
            pyparsing.Literal("#")
            + pyparsing.Or((heading_text, top))
        )
        anchor.leaveWhitespace()
        anchor.parseWithTabs()
        if flag:
            anchor.setDebug()
        anchor.setName("anchor")
        anchor = anchor.setResultsName("anchor")
    except Exception as exception:
        msg = "failed to get anchor parser element:{}".format(exception)
        raise RuntimeError(msg)
    return anchor
Ejemplo n.º 6
0
    def __init__(self, prompt=None, intro=None):
        """
        Initialize Cmd members.

        Args:
            prompt (str): The command-line prompt to display
            intro (str): The program introduction banner

        Returns:
            Nothing
        """
        super().__init__(allow_cli_args=False, allow_redirection=False)
        self.prompt = prompt
        self.intro = intro
        self.commentGrammars = pyparsing.Or(
            [])  # No C or Python comment parsing please
        self.redirector = "\x01"
        self.allow_redirection = False
        self.locals_in_py = False
        # set_use_arg_list(True)
        # set_strip_quotes(True)
        self.del_defaultcmds()

        # Initialize Cli members
        self.tsuite = TestSuite()
        self.subcmds = list(self.tsuite.keys())
        self.subcmds.sort()
        self.runp = argparse.ArgumentParser(
            prog="run",
            description="Executes a plugin (test case)",
            add_help=False)
        self.runp.add_argument(
            "plugin", help="The plugin to execute along with its arguments")
Ejemplo n.º 7
0
 def _compile_alternative(self, element, src_state, dst_state, grammar,
                          kaldi_rule, fst):
     matchers = []
     for child in element.children:
         matchers.append(
             self.compile_element(child, src_state, dst_state, grammar,
                                  kaldi_rule, fst))
     return pp.Or(tuple(matchers))
Ejemplo n.º 8
0
def build_attribute_parser():
    quoted = pp.Or(pp.QuotedString('"'), pp.QuotedString("'"))
    colon = pp.Literal(':').suppress()
    attribute_name = pp.Word(pp.srange('[a-z]'), pp.srange('[a-z0-9_]')).setResultsName('name')
    data_type = pp.Combine(pp.Word(pp.alphas) + pp.SkipTo("#", ignore=quoted)).setResultsName('type')
    default = pp.Literal('=').suppress() + pp.SkipTo(colon, ignore=quoted).setResultsName('default')
    comment = pp.Literal('#').suppress() + pp.restOfLine.setResultsName('comment')
    return attribute_name + pp.Optional(default) + colon + data_type + comment
Ejemplo n.º 9
0
def asdict(string, delim = ','):
    """Return dictionary from given string in ini format."""

    if string.strip() == '': return {}

    import pyparsing

    pp_num = pyparsing.Word(pyparsing.nums + '.')
    pp_str = pyparsing.quotedString
    pp_bool = pyparsing.Or(
        pyparsing.Word("True") | pyparsing.Word("False"))
    pp_key = pyparsing.Word(pyparsing.alphas + "_",
        pyparsing.alphanums + "_.")
    pp_val = pyparsing.Or(pp_num | pp_str | pp_bool)

    # try dictionary dialect "<key> = <value>, ..."
    pp_term = pyparsing.Group(pp_key + '=' + pp_val)
    pp_term_lists = pp_term + pyparsing.ZeroOrMore(delim + pp_term)
    try: list = pp_term_lists.parseString(string.strip('{}'))
    except: list = None

    # try dictionary dialect "'<key>': <value>, ..."
    if list == None:
        pp_term = pyparsing.Group(pp_str + ':' + pp_val)
        pp_term_lists = pp_term + pyparsing.ZeroOrMore(delim + pp_term)
        try: list = pp_term_lists.parseString(string.strip('{}'))
        except: return {}

    # create dictionary
    dictionary = {}
    for item in list:
        if len(item) == 1:
            if item[0] == ',': continue
            dictionary[item] = True
            continue
        try:
            key = item[0].strip('\'\"')
            value = eval(item[2])
        except: continue

        dictionary[key] = value
        if isinstance(dictionary[key], str):
            dictionary[key] = dictionary[key].strip()

    return dictionary
Ejemplo n.º 10
0
    def _get_complete_expression(
            self,
            name_for_expression_after_equal: str) -> pyparsing.ParserElement:
        ''' helper that returns the same pyparsing expression but with a different 'name'
        for the part after the equal sign

        for types, the part after the `=` is the abstract class that the type extends

        for functions, the part after the `=` is the result type
        '''

        # everything in a line that is past the initial 'name' of the class/type that is being defined
        pe_params_list_equal_sign_and_extends_from = \
            self.pe_zero_or_more_params + \
            self.pe_equal_sign_literal.suppress() + \
            self.pe_class_name(name_for_expression_after_equal) + \
            self.pe_semicolon_literal

        # the full line definition for a line that is describing a tdlib type/class (aka not a comment)
        pe_full_line_tdlib_type_def = \
            self.pe_tdlib_class_name + \
            pe_params_list_equal_sign_and_extends_from

        # set parser action to add a 'note' that this is a `DEFINITION` line
        pe_full_line_tdlib_type_def.addParseAction(
            self._setTlLineTypeStringParseAction(
                model.TlFileLineType.DEFINITION))

        # set parser action to add the line and column numbers
        pe_full_line_tdlib_type_def.addParseAction(
            self._setLineAndLineNumberParseAction)


        pe_full_comment_line = \
            self.pe_comment_literal + \
            pyparsing.restOfLine(constants.RESULT_NAME_COMMENT_TEXT)

        # set parser action to add a 'note' that this is a `COMMENT` line
        pe_full_comment_line.addParseAction(
            self._setTlLineTypeStringParseAction(model.TlFileLineType.COMMENT))

        # set parser action to add the line and column numbers
        pe_full_comment_line.addParseAction(
            self._setLineAndLineNumberParseAction)

        to_return = \
            pyparsing.ZeroOrMore(
                pyparsing.Group(
                    pyparsing.Or(
                            [pe_full_line_tdlib_type_def,
                            pe_full_comment_line]
                        )
                    )
                )

        return to_return
Ejemplo n.º 11
0
 def _compile_list_ref(self, element, src_state, dst_state, grammar, kaldi_rule, fst):
     # list_rule_name = "__list_%s" % element.list.name
     if element.list not in grammar.lists:
         # Should only happen during initial compilation; during updates, we must skip this
         grammar.add_list(element.list)
     self.kaldi_rules_by_listreflist_dict[id(element.list)].add(kaldi_rule)
     matchers = []
     for child_str in element.list.get_list_items():
         matchers.append(self._compile_literal(MockLiteral(child_str.split()), src_state, dst_state, grammar, kaldi_rule, fst))
     return pp.Or(tuple(matchers))
Ejemplo n.º 12
0
def _MakeReactionParser():
    """Builds a pyparsing-based recursive descent parser for chemical reactions."""
    reaction_side = _MakeReactionSideParser()
    
    side_separators = [pyparsing.Literal(s) for s in POSSIBLE_REACTION_ARROWS]
    side_separator = pyparsing.Or(side_separators).suppress()
    
    reaction = pyparsing.Forward()
    reaction << (reaction_side + side_separator + reaction_side)
    return reaction
Ejemplo n.º 13
0
    def parse(self, atom, named_formula):
        result = self.bnf.parseString(atom)
        result = {'name': result['name'], 'vars': list(result.get('vars', []))}

        if result['name'] in self.all_atoms:
            raise PieceNameError

        FormulaGrammar(self.all_atoms, pp.Or([pp.Literal(var) for var in result['vars']]))\
            .bnf.parseString(named_formula, True)

        return result
Ejemplo n.º 14
0
def parse_turbo_gradient(path):
    results = {}
    gradient_fn = glob.glob(os.path.join(path, "gradient"))
    if not gradient_fn:
        raise Exception("gradient file not found!")
    assert(len(gradient_fn) == 1)
    gradient_fn = gradient_fn[0]
    with open(gradient_fn) as handle:
        text = handle.read()

    float_ = make_float_class()
    cycle = pp.Word(pp.nums).setResultsName("cycle")
    scf_energy = float_.setResultsName("scf_energy")
    grad_norm = float_.setResultsName("grad_norm")
    float_line = float_ + float_ + float_
    coord_line = pp.Group(float_line + pp.Word(pp.alphas))
    grad_line = pp.Group(float_line)
    cart_grads = pp.Literal("cartesian gradients")
    energy_type = pp.Or((pp.Literal("SCF energy"),
                        pp.Literal("ex. state energy"),
                        pp.Literal("CC2 energy"),
                        pp.Literal("ADC(2) energy"),
                        pp.Literal("MP2 energy"),
    ))

    parser = (
        pp.Or((pp.Literal("$grad"), pp.Literal("$gradient"))) + pp.Optional(cart_grads) +
        pp.Literal("cycle =") + cycle +
        energy_type + pp.Literal("=") + scf_energy +
        pp.Literal("|dE/dxyz| =") + grad_norm +
        pp.OneOrMore(coord_line).setResultsName("coords") +
        pp.OneOrMore(grad_line).setResultsName("grad") +
        pp.Literal("$end")
    )
    parsed = parser.parseString(text)
    gradient = np.array(parsed["grad"].asList()).flatten()

    results["energy"] = parsed["scf_energy"]
    results["forces"] = -gradient
    return results
Ejemplo n.º 15
0
def compile_attribute(line, in_key=False):
    """
    Convert attribute definition from DataJoint format to SQL

    :param line: attribution line
    :param in_key: set to True if attribute is in primary key set
    :returns: (name, sql) -- attribute name and sql code for its declaration
    """
    quoted = pp.Or(pp.QuotedString('"'), pp.QuotedString("'"))
    colon = pp.Literal(':').suppress()
    attribute_name = pp.Word(pp.srange('[a-z]'),
                             pp.srange('[a-z0-9_]')).setResultsName('name')

    data_type = pp.Combine(pp.Word(pp.alphas) +
                           pp.SkipTo("#", ignore=quoted)).setResultsName(
                               'type')
    default = pp.Literal('=').suppress() + pp.SkipTo(
        colon, ignore=quoted).setResultsName('default')
    comment = pp.Literal('#').suppress() + pp.restOfLine.setResultsName(
        'comment')

    attribute_parser = attribute_name + pp.Optional(
        default) + colon + data_type + comment

    match = attribute_parser.parseString(line + '#', parseAll=True)
    match['comment'] = match['comment'].rstrip('#')
    if 'default' not in match:
        match['default'] = ''
    match = {k: v.strip() for k, v in match.items()}
    match['nullable'] = match['default'].lower() == 'null'

    literals = ['CURRENT_TIMESTAMP']  # not to be enclosed in quotes
    if match['nullable']:
        if in_key:
            raise DataJointError(
                'Primary key attributes cannot be nullable in line %s' % line)
        match[
            'default'] = 'DEFAULT NULL'  # nullable attributes default to null
    else:
        if match['default']:
            quote = match['default'].upper(
            ) not in literals and match['default'][0] not in '"\''
            match['default'] = ('NOT NULL DEFAULT ' +
                                ('"%s"' if quote else "%s") % match['default'])
        else:
            match['default'] = 'NOT NULL'
    match['comment'] = match['comment'].replace(
        '"', '\\"')  # escape double quotes in comment
    sql = ('`{name}` {type} {default}' +
           (' COMMENT "{comment}"' if match['comment'] else '')).format(
               **match)
    return match['name'], sql
Ejemplo n.º 16
0
    def definePackagePattern(self):
        first = pp.Word(pp.alphas+"_", exact=1)
        rest = pp.Word(pp.alphanums+"_")
        identifier = first+pp.Optional(rest)

        digit = pp.Word(pp.nums, exact=1)
        nonzero_digit = pp.Word(pp.nums, exact=1, excludeChars=['0'])
        nonnegative_digits = pp.Or([digit, nonzero_digit+pp.Word(pp.nums)])

        user = channel = identifier
        letterdigit = pp.Word(pp.alphanums, exact=1)

        library = pp.Combine(letterdigit + pp.Word(pp.alphanums+"-"+"_"))
        rc = pp.Combine(pp.Literal('-rc')+'-'+nonnegative_digits)
        alpha = pp.Combine(pp.Literal('-alpha')+'-'+nonnegative_digits)
        beta = pp.Combine(pp.Literal('-beta')+'-'+nonnegative_digits)
        version = pp.Combine(nonnegative_digits+'.'+nonnegative_digits+ pp.Optional('.'+nonnegative_digits)+
                  pp.Optional('.'+nonnegative_digits)+pp.Optional(pp.Or([rc, alpha, beta])))
        user_channel = pp.Or([user+'/'+channel, pp.Literal('PROJECT')])
        package = pp.Combine(library.setResultsName('name')+'/'+version.setResultsName('version')+'@'+ user_channel)
        quote_package = pp.Combine('"'+package+'"')
        return quote_package
Ejemplo n.º 17
0
    def get_pos_classifiers(self):
        """
        Generates a dictionary of part-of-speech classifier expressions for pyparsing
        based on the locale vocabulary tables.
        """
        # TODO: in the future, we may need contextual recognition using perturbational
        #       elements for words which can appear in several parts of speech. This may
        #       be necessary in highly analytic languages (e.g. Chinese).
        #
        #       The idea would be to vary the recognition for each word matched, and
        #       try multiple parses on a sentence until a good match is made.
        #
        #       However, in very small vocabularies, collisions are unlikely, and this
        #       is a pretty complicated idea, so I'm not going to try it until it's
        #       needed.

        # Two main problems:
        #   1) Find all the words of a given PoS and create a big "OR" expression
        #   2) For each element of the "OR", tolerate inflections as minimally as possibly

        # FIXME: should load using define_classifiers first, then overload.
        self.pos_match = {}
        pos_initial = self.grammar.define_classifiers()
        for i,pos in enumerate(PoS.Syms):
            self.pos_match[pos] = pos_initial[i]
            
        for pos in PoS.Syms:
            vocabulary_glosses = []
            for concept in self.concepts[PoS.Sym2Grp[pos]].values():
                vocabulary_glosses.extend(concept)
            # print vocabulary_glosses
            gloss_expressions = []
            for gloss in vocabulary_glosses:
                gloss_expressions.append(
                    self._adapt_match_expression(
                         self.grammar.define_inflections(pos, gloss)))
                         
            inflected_match = pyparsing.Or(gloss_expressions)

            if not self.pos_match[pos]:
                full_match = inflected_match
            elif not inflected_match:
                full_match = self.pos_match[pos]
            else:
                full_match = self.pos_match[pos] ^ inflected_match

            #print PoS.Sym2Abr[pos], full_match

            full_match.setName(PoS.Sym2Abr[pos].capitalize())

            self.pos_match[pos] = full_match
Ejemplo n.º 18
0
def get_grammar():
    punctuation = pp.Word(".,:;()/")

    word_no_angle_bracket = pp.Word(pp.printables, excludeChars="<>")
    html_tag = pp.Combine(
        pp.Optional(punctuation)
        + "<"
        + pp.delimitedList(word_no_angle_bracket, delim=" ", combine=True)
        + ">"
        + pp.Optional(punctuation)
    )

    word_no_backtick = pp.Word(pp.printables, excludeChars="`")
    code = pp.Combine(
        pp.Optional(punctuation)
        + "`"
        + pp.delimitedList(word_no_backtick, delim=" ", combine=True)
        + "`"
        + pp.Optional(punctuation)
    )

    word = pp.Or( word_no_backtick | word_no_angle_bracket)
    words = pp.delimitedList(word, delim=" ", combine=True)

    token_ = pp.Or(html_tag | code | word)

    bullet = pp.Combine("*" + pp.White() + token_)

    heading = pp.Combine(
        pp.OneOrMore("#")
        + pp.OneOrMore(" ")
        + pp.delimitedList(token_, delim=" ", combine=True)
    )

    token = pp.Or(heading | bullet | token_)

    cfg = token[1, ...]
    return cfg
Ejemplo n.º 19
0
def parse(s):
    code = pp.Forward()
    opcode = pp.Or([
        pp.Literal('+'),
        pp.Literal('-'),
        pp.Literal('*'),
        pp.Literal('/'),
        pp.Literal('_'),
        pp.Literal('='),
        pp.Literal('>'),
        pp.Literal('&'),
        pp.Literal('|'),
        pp.Literal('~'),
        pp.Literal('$'),
        pp.Literal('%'),
        pp.Literal('\\'),
        pp.Literal('@'),
        pp.Literal('ø'),
        pp.Literal('p'),
        pp.Literal(':'),
        pp.Literal(';'),
        pp.Literal('!'),
        pp.Literal('?'),
        pp.Literal('#'),
    ]).setParseAction(lambda toks: ast.Opcode(toks[0]))
    number = (pp.Word('1234567890').setParseAction(
        lambda toks: ast.Number(int(toks[0]))))
    str_def = ((pp.Literal('"') + pp.SkipTo(pp.Literal('"'), include=True)
                ).setParseAction(lambda toks: ast.String(toks[1])))
    varname = (pp.Word(
        'qwertyuiopasdfghjklzxcvbnm',
        exact=1).setParseAction(lambda toks: ast.Varname(toks[0])))
    fn_def = pp.Suppress(pp.Literal('[')) + code + pp.Suppress(pp.Literal(']'))
    expr = pp.Or([opcode, number, varname, str_def, fn_def])
    atom = pp.Or([expr])
    code << pp.ZeroOrMore(atom)
    code.setParseAction(lambda toks: ast.Function(toks))
    return code.parseString(s)[0]
Ejemplo n.º 20
0
    def __foreign_key_parser(database):
        def paste_database(unused1, unused2, toc):
            return ['`{database}`.`{table}`'.format(database=database, table=toc[0])]

        return (pp.CaselessLiteral('CONSTRAINT').suppress() +
                pp.QuotedString('`').suppress() +
                pp.CaselessLiteral('FOREIGN KEY').suppress() +
                pp.QuotedString('(', endQuoteChar=')').setResultsName('attributes') +
                pp.CaselessLiteral('REFERENCES') +
                pp.Or([
                    pp.QuotedString('`').setParseAction(paste_database),
                    pp.Combine(pp.QuotedString('`', unquoteResults=False) + '.' +
                               pp.QuotedString('`', unquoteResults=False))]).setResultsName('referenced_table') +
                pp.QuotedString('(', endQuoteChar=')').setResultsName('referenced_attributes'))
Ejemplo n.º 21
0
    def _make_matcher_element(self):
        # Handle the case where use_current_match is True.
        if self.use_current_match is True:
            current_match = self.current_match
            if current_match is None:
                result = pyparsing.NoMatch()
            elif current_match == "":
                result = pyparsing.Empty()
            else:
                result = pyparsing.Literal(self.current_match)

            # Set the element's attributes and return it.
            return self._set_matcher_element_attributes(result)

        # Otherwise build a list of next possible literals. Make the required stack
        # of child-parent pairs.
        stack = []
        p1, p2 = self, self.parent
        while p1 and p2:
            stack.append((p1, p2))

            # Move both pivots further up the tree.
            p1 = p1.parent
            p2 = p2.parent

        # Build a list of next literals using the stack.
        next_literals, _ = _collect_next_literals(stack, 0, True, False)

        # De-duplicate the list.
        next_literals = set(next_literals)

        word = pyparsing.Regex(_word_regex_str, re.UNICODE)
        if next_literals:
            # Check if there is a next dictation literal. If there is, only match
            # one word for this expansion.
            if _word_regex_str in next_literals:
                result = word

            # Otherwise build an element to match one or more words stopping on
            # any of the next literals so that they aren't matched as dictation.
            else:
                next_literals = list(map(pyparsing.Literal, next_literals))
                result = pyparsing.OneOrMore(
                    word, stopOn=pyparsing.Or(next_literals))
        else:
            # Handle the case of no literals ahead by allowing one or more Unicode
            # words without restrictions.
            result = pyparsing.OneOrMore(word)

        return self._set_matcher_element_attributes(result)
Ejemplo n.º 22
0
def timeentry_expr():
    # fmt:off
    start_time = (
        pp.Word(pp.nums) +
        pp.Optional(":" + pp.Word(pp.nums))).setResultsName("first_time")
    end_time = (
        pp.Word(pp.nums) +
        pp.Optional(":" + pp.Word(pp.nums))).setResultsName("last_time")

    interval_h = pp.Word(pp.nums).setResultsName("hours") + pp.Char("h")
    interval_m = pp.Word(pp.nums).setResultsName("minutes") + pp.Char("m")

    timepart = pp.Or([
        start_time + pp.Char("-") + end_time,
        pp.Or([interval_h, interval_m, interval_h + interval_m])
    ])
    timeseppart = pp.Optional(pp.Char('|:-'))
    tagpart = (pp.Char('[') +
               pp.CharsNotIn('[]').setResultsName("tag", listAllMatches=True) +
               pp.Char(']'))
    descpart = pp.SkipTo(pp.lineEnd).setResultsName("desc")

    return timepart + timeseppart + pp.OneOrMore(tagpart) + descpart
Ejemplo n.º 23
0
def rhs_value_p():
    Ipv4Address = Combine(Word(nums) +
                          ('.' + Word(nums)) * 3).setResultsName('ipv4')
    Ipv4Address = Ipv4Address.setParseAction(lambda s, l, toks: toks[0])

    Int = Word(nums)
    Int = Int.setParseAction(lambda s, l, toks: int(toks[0]))

    Float = Combine(Word(nums) + '.' + Word(nums)).setResultsName('float')
    Float = Float.setParseAction(lambda s, l, toks: float(toks[0]))

    String = quotedString.copy().addParseAction(pyparsing.removeQuotes)

    rhs = pyparsing.Or([String, Int, Float, Ipv4Address])
    return rhs
Ejemplo n.º 24
0
    def __init__(self, language):
        """Initializes a `Parser` instance.

    Args:
      language: Instance of `Language`. Used to determine the different
          predicate / constant / variable symbols appearing.
    """
        self._language = language
        predicate_symbol = pyparsing.oneOf(language.predicates +
                                           [FALSE_SYMBOL, TRUE_SYMBOL])
        constant_symbol = pyparsing.oneOf(language.constants)
        variable_symbol = pyparsing.oneOf(language.variables)

        left_par = pyparsing.Literal('(').suppress()
        right_par = pyparsing.Literal(')').suppress()

        formula = pyparsing.Forward()

        relation_expressions = self._relation_expressions(
            predicate_symbol, pyparsing.Or([constant_symbol, variable_symbol]))

        formula_without_op = pyparsing.Forward()
        negated_formula_without_op = (pyparsing.Literal('~').suppress() +
                                      formula_without_op)
        negated_formula_without_op.setParseAction(
            lambda args: self._op(args, NEGATION_SYMBOL))
        formula_without_op <<= pyparsing.MatchFirst(
            [left_par + formula + right_par] + relation_expressions +
            [negated_formula_without_op])

        binary_expressions = self._binary_expressions(formula_without_op,
                                                      formula)

        negation = pyparsing.Literal('~').suppress() + formula
        negation.setParseAction(lambda args: self._op(args, NEGATION_SYMBOL))

        for_all = (pyparsing.Literal('all').suppress() + variable_symbol +
                   pyparsing.Literal('.').suppress() + formula)
        for_all.setParseAction(lambda args: self._op(args, FOR_ALL_SYMBOL))

        exists = (pyparsing.Literal('exists').suppress() + variable_symbol +
                  pyparsing.Literal('.').suppress() + formula)
        exists.setParseAction(lambda args: self._op(args, EXISTS_SYMBOL))

        formula <<= pyparsing.MatchFirst(binary_expressions + [negation] +
                                         [for_all, exists, formula_without_op])

        self._expression = formula
Ejemplo n.º 25
0
    def _build_precedence(self, precedence_table):
        # C's & dereference operator.
        precedence = []
        for operators, arity, associativity in precedence_table:
            operators = [pyparsing.Literal(x) for x in operators]

            if arity in [_UNARY, _BINARY]:
                operators = pyparsing.Or(operators)

            precedence.append((
                operators,
                arity,
                associativity,
                self._construct_operator(arity),
            ))
        return precedence
Ejemplo n.º 26
0
def _setup_parser():
    arg_prefix = pp.Literal('-')
    arg_prefix2 = pp.Literal('--')

    arg_name = pp.Word(pp.alphanums)

    non_shell_chars = pp.alphanums + ':/*%_'
    arg_value = pp.Word(non_shell_chars)

    argument = (
        pp.Or([arg_prefix, arg_prefix2]) + arg_name + pp.ZeroOrMore(arg_value))
    argument.setParseAction(
        lambda tokens: (tokens[0] + tokens[1], tuple(tokens[2:])))

    argument_list = pp.ZeroOrMore(argument)

    return argument_list
Ejemplo n.º 27
0
    def __init__(self):
        element = pyparsing.oneOf(ELEMENTS)
        coeff = pyparsing.Word(pyparsing.nums)
        optional_coeff = pyparsing.Optional(coeff)

        element_and_count = pyparsing.Forward()
        element_and_count << (element + optional_coeff)
        element_and_count.setParseAction(self.HandleElementAndCount)

        n_block = pyparsing.Forward()
        n_block << ('(' + pyparsing.OneOrMore(element_and_count) + ')n')
        n_block.setParseAction(self.HandleNBlock)

        element_or_block = pyparsing.Or([element_and_count, n_block])
        self.formula_parser = pyparsing.OneOrMore(element_or_block)

        self.formula_re = re.compile(r'\(?([A-Z][a-z]?)([0-9]*)(\)n)?')
Ejemplo n.º 28
0
    def as_pp_parser(self) -> pp.ParserElement:
        pos_args = [i.pp_parser for i in self.pos_args] + [rest_parser.copy()]
        opt_args = [i.pp_parser for i in self.opt_args]
        if opt_args:
            optionals = pp.Or(opt_args)
            args = intersperse_parser(pos_args, optionals)
        else:
            args = pp.And(pos_args)
        args.setParseAction(update_dict)
        cw = pp.CaselessKeyword(
            self.command_word)("command").setParseAction(lambda x: self.func)

        if self.add_help:
            args = (self.help_arg.pp_parser
                    | args).setParseAction(lambda x: x[0])

        return (cw + args("options")).streamline()
Ejemplo n.º 29
0
def get_internal_link(namespaces, flag=False):
    """Get internal link parser element.

    internal_link =
    "[[", [ [ namespace ], ":" ], ( anchor | page_name, [ anchor ] ),
    [ "|", [ link_text ] ], "]]", [ word_ending ];

    :param list namespaces: namespaces
    :param bool flag: toggle debug messages on/off

    :returns: internal link
    :rtype: ParserElement
    """
    try:
        internal_link_opening = pyparsing.Literal("[[")
        namespace = _get_namespace(namespaces, flag=flag)
        colon = pyparsing.Literal(":")
        page_name = _get_page_name(flag=flag)
        anchor = _get_anchor(flag=flag)
        pipe = pyparsing.Literal("|")
        link_text = _get_link_text(flag=flag)
        internal_link_closing = pyparsing.Literal("]]")
        word_ending = _get_word_ending(flag=flag)
        internal_link = pyparsing.Combine(
            internal_link_opening
            + pyparsing.Optional(pyparsing.Optional(namespace) + colon)
            + pyparsing.Or(
                (
                    anchor,
                    pyparsing.Combine(page_name + pyparsing.Optional(anchor))
                )
            )
            + pyparsing.Optional(pipe + pyparsing.Optional(link_text))
            + internal_link_closing
            + pyparsing.Optional(word_ending)
        )
        internal_link.leaveWhitespace()
        internal_link.parseWithTabs()
        if flag:
            internal_link.setDebug()
        internal_link.setName("internal_link")
        internal_link = internal_link.setResultsName("internal_link")
    except Exception as exception:
        msg = "failed to return internal link:{}".format(exception)
        raise RuntimeError(msg)
    return internal_link
Ejemplo n.º 30
0
class Record(Node):
    """
    Single standalone entry of `pactl list`.

    The record is composed of a name and a list of attributes.  Pulseaudio
    exposes objects such as cards, sinks and sources as separate records.

    Each attribute may be of a different type. Some attributes are simple
    values while others have finer structure, including lits and even
    additional recursive attributes.
    """

    __fragments__ = {
        'name': 'record-name',
        'attribute_list': lambda t: t['record-attributes'].asList(),
        'attribute_map': lambda t: OrderedDict(
            (attr.name, attr)
            for attr in t['record-attributes'].asList()),
    }

    __syntax__ = (
        p.LineStart()
        + p.NotAny(p.White(' \t'))
        + p.Regex("[A-Z][a-zA-Z ]+ #[0-9]+").setResultsName("record-name")
        + p.LineEnd().suppress()
        + p.OneOrMore(
            p.Or([
                GenericListAttribute.Syntax,
                GenericSimpleAttribute.Syntax,
            ])
        ).setResultsName("record-attributes")
    ).setResultsName("record")

    def as_json(self):
        return {
            'name': self.name,
            'attribute_list': self.attribute_list,
        }

    def __repr__(self):
        # Custom __repr__ that skips attribute_map
        return "{}({})".format(
            type(self).__name__, ", ".join([
                "{}={!r}".format(attr, getattr(self, attr))
                for attr in ['name', 'attribute_list']]))