Esempio n. 1
0
aop1 = oneOf('+ -')
aop2 = oneOf('%').setParseAction(lambda s, l, t: ['mod'])

bop = oneOf('& |').setParseAction(lambda s, l, t: [t[0] + t[0]])
NOT = Literal('!')

rop = oneOf('< > <= >= = !=')

GET, CAT, HAS, IND, LEN, REP, SUB, EQL = map(
    Literal, '#get #cat #has #ind #len #rep #sub #eql'.split())

var = Word(alphas + '_:$', alphanums + '_:$').setParseAction(addVar)
ival = Combine(Optional('-') + Word(nums)).setParseAction(addConst)
ivar = (ival + var).setParseAction(lambda s, l, t: [t[0], '*', t[1]])

term = ivar | ival | var | QuotedString(
    quoteChar='"', unquoteResults=False).setParseAction(addConst)

stmt = Forward()
expr = Forward()
sexpr = Forward()

sexpr << (
    (GET + LPAR + expr + COMMA + expr + RPAR).setParseAction(
        lambda s, l, t:
        [['Char.escaped', ['String.get', chkString(t[1]), t[2]]]])
    |
    (CAT + LPAR + expr + COMMA + expr + RPAR
     ).setParseAction(lambda s, l, t: [[chkString(t[
         1]), '^', chkString(t[2])]])
    |
    (IND + LPAR + expr + COMMA + expr + RPAR).setParseAction(lambda s, l, t: [[
Esempio n. 2
0
def _create_grammar():
    """Create the DBC grammar.

    """

    word = Word(printables.replace(';', '').replace(':', ''))
    integer = Group(Optional('-') + Word(nums))
    positive_integer = Word(nums).setName('positive integer')
    number = Word(nums + '.Ee-+')
    colon = Suppress(Literal(':'))
    scolon = Suppress(Literal(';'))
    pipe = Suppress(Literal('|'))
    at = Suppress(Literal('@'))
    sign = Literal('+') | Literal('-')
    lp = Suppress(Literal('('))
    rp = Suppress(Literal(')'))
    lb = Suppress(Literal('['))
    rb = Suppress(Literal(']'))
    comma = Suppress(Literal(','))
    node = Word(alphas + nums + '_-').setWhitespaceChars(' ')
    frame_id = Word(nums).setName('frame id')

    version = Group(Keyword('VERSION') - QuotedString('"', multiline=True))
    version.setName(VERSION)

    symbol = Word(alphas + '_') + Suppress(LineEnd())

    symbols = Group(Keyword('NS_') - colon - Group(ZeroOrMore(symbol)))
    symbols.setName('NS_')

    discard = Suppress(Keyword('BS_') - colon).setName('BS_')

    nodes = Group(Keyword('BU_') - colon - Group(ZeroOrMore(node)))
    nodes.setName('BU_')

    signal = Group(
        Keyword(SIGNAL) - Group(word + Optional(word)) - colon -
        Group(positive_integer - pipe - positive_integer - at -
              positive_integer - sign) -
        Group(lp - number - comma - number - rp) -
        Group(lb - number - pipe - number - rb) -
        QuotedString('"', multiline=True) - Group(delimitedList(node)))
    signal.setName(SIGNAL)

    message = Group(
        Keyword(MESSAGE) - frame_id - word - colon - positive_integer - word -
        Group(ZeroOrMore(signal)))
    message.setName(MESSAGE)

    event = Suppress(
        Keyword(EVENT) - word - colon - positive_integer - lb - number - pipe -
        number - rb - QuotedString('"', multiline=True) - number - number -
        word - node - scolon)
    event.setName(EVENT)

    comment = Group(
        Keyword(COMMENT) -
        ((Keyword(MESSAGE) - frame_id - QuotedString('"', multiline=True) -
          scolon).setName(MESSAGE)
         | (Keyword(SIGNAL) - frame_id - word -
            QuotedString('"', multiline=True) - scolon).setName(SIGNAL)
         | (Keyword(NODES) - word - QuotedString('"', multiline=True) -
            scolon).setName(NODES)
         | (Keyword(EVENT) - word - QuotedString('"', multiline=True) -
            scolon).setName(EVENT)
         |
         (QuotedString('"', multiline=True) - scolon).setName('QuotedString')))
    comment.setName(COMMENT)

    attribute_definition = Group(
        Keyword(ATTRIBUTE_DEFINITION) -
        ((QuotedString('"', multiline=True))
         | (Keyword(SIGNAL)
            | Keyword(MESSAGE)
            | Keyword(EVENT)
            | Keyword(NODES)) + QuotedString('"', multiline=True)) - word -
        (scolon
         | (Group(
             ZeroOrMore(
                 Group((comma | Empty()) +
                       QuotedString('"', multiline=True)))) + scolon)
         | (Group(ZeroOrMore(number)) + scolon)))
    attribute_definition.setName(ATTRIBUTE_DEFINITION)

    attribute_definition_default = Group(
        Keyword(ATTRIBUTE_DEFINITION_DEFAULT) -
        QuotedString('"', multiline=True) -
        (number | QuotedString('"', multiline=True)) - scolon)
    attribute_definition_default.setName(ATTRIBUTE_DEFINITION_DEFAULT)

    attribute = Group(
        Keyword(ATTRIBUTE) - QuotedString('"', multiline=True) - Group(
            Optional((Keyword(MESSAGE) + frame_id)
                     | (Keyword(SIGNAL) + frame_id + word)
                     | (Keyword(NODES) + word))) -
        (QuotedString('"', multiline=True) | number) - scolon)
    attribute.setName(ATTRIBUTE)

    choice = Group(
        Keyword(CHOICE) - Group(Optional(frame_id)) - word -
        Group(OneOrMore(Group(integer + QuotedString('"', multiline=True)))) -
        scolon)
    choice.setName(CHOICE)

    value_table = Group(
        Keyword(VALUE_TABLE) - word -
        Group(OneOrMore(Group(integer + QuotedString('"', multiline=True)))) -
        scolon)
    value_table.setName(VALUE_TABLE)

    signal_type = Group(
        Keyword(SIGNAL_TYPE) - frame_id - word - colon - positive_integer -
        scolon)
    signal_type.setName(SIGNAL_TYPE)

    signal_multiplexer_values = Group(
        Keyword(SIGNAL_MULTIPLEXER_VALUES) - frame_id - word - word - Group(
            delimitedList(positive_integer - Suppress('-') -
                          Suppress(positive_integer))) - scolon)
    signal_multiplexer_values.setName(SIGNAL_MULTIPLEXER_VALUES)

    message_add_sender = Group(
        Keyword(MESSAGE_TX_NODE) - frame_id - colon -
        Group(delimitedList(node)) - scolon)
    message_add_sender.setName(MESSAGE_TX_NODE)

    attribute_definition_rel = Group(
        Keyword(ATTRIBUTE_DEFINITION_REL) -
        (QuotedString('"', multiline=True)
         | (Keyword(NODES_REL) + QuotedString('"', multiline=True))) - word -
        (scolon
         | (Group(
             ZeroOrMore(
                 Group((comma | Empty()) +
                       QuotedString('"', multiline=True)))) + scolon)
         | (Group(ZeroOrMore(number)) + scolon)))
    attribute_definition_rel.setName(ATTRIBUTE_DEFINITION_REL)

    attribute_definition_default_rel = Group(
        Keyword(ATTRIBUTE_DEFINITION_DEFAULT_REL) -
        QuotedString('"', multiline=True) -
        (number | QuotedString('"', multiline=True)) - scolon)
    attribute_definition_default_rel.setName(ATTRIBUTE_DEFINITION_DEFAULT_REL)

    attribute_rel = Group(
        Keyword(ATTRIBUTE_REL) - QuotedString('"', multiline=True) -
        Keyword(NODES_REL) - word - Keyword(SIGNAL) - frame_id - word -
        (positive_integer | QuotedString('"')) - scolon)
    attribute_rel.setName(ATTRIBUTE_REL)

    sig_group = Group(
        Keyword(SIGNAL_GROUP) - frame_id - word - integer - colon -
        OneOrMore(word) - scolon)
    sig_group.setName(SIGNAL_GROUP)

    entry = (version
             | symbols
             | discard
             | nodes
             | message
             | comment
             | attribute_definition
             | attribute_definition_default
             | attribute
             | choice
             | value_table
             | signal_type
             | signal_multiplexer_values
             | message_add_sender
             | attribute_definition_rel
             | attribute_definition_default_rel
             | attribute_rel
             | sig_group
             | event)

    frame_id.setParseAction(lambda _s, _l, t: int(t[0]))

    return OneOrMore(entry) + StringEnd()
Esempio n. 3
0
    def parse_query_string(self, query_string):
        # pylint: disable=too-many-locals
        """
        Function that parse the querystring, extracting infos for limit, offset,
        ordering, filters, attribute and extra projections.
        :param query_string (as obtained from request.query_string)
        :return: parsed values for the querykeys
        """

        from pyparsing import Word, alphas, nums, alphanums, printables, \
            ZeroOrMore, OneOrMore, Suppress, Optional, Literal, Group, \
            QuotedString, Combine, \
            StringStart as SS, StringEnd as SE, \
            WordEnd as WE, \
            ParseException

        from pyparsing import pyparsing_common as ppc
        from dateutil import parser as dtparser
        from psycopg2.tz import FixedOffsetTimezone

        ## Define grammar
        # key types
        key = Word(alphas + '_', alphanums + '_')
        # operators
        operator = (
            Literal('=like=') | Literal('=ilike=') | Literal('=in=') | Literal('=notin=') | Literal('=') |
            Literal('!=') | Literal('>=') | Literal('>') | Literal('<=') | Literal('<')
        )
        # Value types
        value_num = ppc.number
        value_bool = (Literal('true') | Literal('false')).addParseAction(lambda toks: bool(toks[0]))
        value_string = QuotedString('"', escQuote='""')
        value_orderby = Combine(Optional(Word('+-', exact=1)) + key)

        ## DateTimeShift value. First, compose the atomic values and then
        # combine
        #  them and convert them to datetime objects
        # Date
        value_date = Combine(
            Word(nums, exact=4) + Literal('-') + Word(nums, exact=2) + Literal('-') + Word(nums, exact=2)
        )
        # Time
        value_time = Combine(
            Literal('T') + Word(nums, exact=2) + Optional(Literal(':') + Word(nums, exact=2)) +
            Optional(Literal(':') + Word(nums, exact=2))
        )
        # Shift
        value_shift = Combine(Word('+-', exact=1) + Word(nums, exact=2) + Optional(Literal(':') + Word(nums, exact=2)))
        # Combine atomic values
        value_datetime = Combine(
            value_date + Optional(value_time) + Optional(value_shift) + WE(printables.replace('&', ''))
            # To us the
            # word must end with '&' or end of the string
            # Adding  WordEnd  only here is very important. This makes atomic
            # values for date, time and shift not really
            # usable alone individually.
        )

        ########################################################################

        def validate_time(toks):
            """
            Function to convert datetime string into datetime object. The format is
            compliant with ParseAction requirements

            :param toks: datetime string passed in tokens
            :return: datetime object
            """

            datetime_string = toks[0]

            # Check the precision
            precision = len(datetime_string.replace('T', ':').split(':'))

            # Parse
            try:
                dtobj = dtparser.parse(datetime_string)
            except ValueError:
                raise RestInputValidationError(
                    'time value has wrong format. The '
                    'right format is '
                    '<date>T<time><offset>, '
                    'where <date> is expressed as '
                    '[YYYY]-[MM]-[DD], '
                    '<time> is expressed as [HH]:[MM]:['
                    'SS], '
                    '<offset> is expressed as +/-[HH]:['
                    'MM] '
                    'given with '
                    'respect to UTC'
                )
            if dtobj.tzinfo is not None and dtobj.utcoffset() is not None:
                tzoffset_minutes = int(dtobj.utcoffset().total_seconds() // 60)
                return DatetimePrecision(
                    dtobj.replace(tzinfo=FixedOffsetTimezone(offset=tzoffset_minutes, name=None)), precision
                )

            return DatetimePrecision(dtobj.replace(tzinfo=FixedOffsetTimezone(offset=0, name=None)), precision)

        ########################################################################

        # Convert datetime value to datetime object
        value_datetime.setParseAction(validate_time)

        # More General types
        value = (value_string | value_bool | value_datetime | value_num | value_orderby)
        # List of values (I do not check the homogeneity of the types of values,
        # query builder will do it somehow)
        value_list = Group(value + OneOrMore(Suppress(',') + value) + Optional(Suppress(',')))

        # Fields
        single_field = Group(key + operator + value)
        list_field = Group(key + (Literal('=in=') | Literal('=notin=')) + value_list)
        orderby_field = Group(key + Literal('=') + value_list)
        field = (list_field | orderby_field | single_field)

        # Fields separator
        separator = Suppress(Literal('&'))

        # General query string
        general_grammar = SS() + Optional(field) + ZeroOrMore(
            separator + field) + \
                          Optional(separator) + SE()

        ## Parse the query string
        try:
            fields = general_grammar.parseString(query_string)

            # JQuery adds _=timestamp a parameter to not use cached data/response.
            # To handle query, remove this "_" parameter from the query string
            # For more details check issue #789
            # (https://github.com/aiidateam/aiida-core/issues/789) in aiida-core
            field_list = [entry for entry in fields.asList() if entry[0] != '_']

        except ParseException as err:
            raise RestInputValidationError(
                'The query string format is invalid. '
                "Parser returned this massage: \"{"
                "}.\" Please notice that the column "
                'number '
                'is counted from '
                'the first character of the query '
                'string.'.format(err)
            )

        ## return the translator instructions elaborated from the field_list
        return self.build_translator_parameters(field_list)
Esempio n. 4
0

word_characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789'
expr = Forward()

# define the parser
integer = Word(nums)
real = Combine(Word(nums) + "." + Word(nums))

constant = oneOf('True False None yes no') + WordEnd(word_characters)

variable = Regex(r'([a-zA-Z0-9\._]+)')
explicit_variable = '$' + Regex(r'([a-zA-Z0-9\._]+)')
current_scope = Literal('$$')

string = (QuotedString("'''", escChar=None, unquoteResults=True)
          | QuotedString('"""', escChar=None, unquoteResults=True)
          | QuotedString('"', escChar="\\", unquoteResults=True)
          | QuotedString('\'', escChar="\\", unquoteResults=True))
regexp = QuotedString('/', escChar=None)
timespan = Combine(Word(nums) + oneOf('ms s m h d'))

current_scope_operand = current_scope
variable_operand = variable
explicit_variable_operand = explicit_variable
integer_operand = integer
real_operand = real
number_operand = real | integer
string_operand = string

groupop = Literal(',')
Esempio n. 5
0
def _create_grammar_6_0():
    """Create the SYM 6.0 grammar.

    """

    word = Word(printables.replace(';', '').replace(':', ''))
    positive_integer = Word(nums)
    number = Word(nums + '.Ee-+')
    lp = Suppress(Literal('('))
    rp = Suppress(Literal(')'))
    lb = Suppress(Literal('['))
    rb = Suppress(Literal(']'))
    name = Word(alphas + nums + '_-').setWhitespaceChars(' ')
    assign = Suppress(Literal('='))
    comma = Suppress(Literal(','))
    type_ = name

    version = Group(Keyword('FormatVersion')
                    - assign
                    - Keyword('6.0'))

    title = Group(Keyword('Title')
                  - assign
                  - QuotedString('"'))

    enum_value = Group(number
                       + assign
                       + QuotedString('"'))

    enum = Group(Suppress(Keyword('Enum'))
                 - assign
                 - name
                 - Suppress(lp)
                 + Group(delimitedList(enum_value))
                 - Suppress(rp))

    sig_unit = Group(Literal('/u:') + word)
    sig_factor = Group(Literal('/f:') + word)
    sig_offset = Group(Literal('/o:') + word)
    sig_min = Group(Literal('/min:') + word)
    sig_max = Group(Literal('/max:') + word)
    sig_default = Group(Literal('/d:') + word)
    sig_long_name = Group(Literal('/ln:') + word)
    sig_enum = Group(Literal('/e:') + word)

    signal = Group(Suppress(Keyword('Sig'))
                   - Suppress(assign)
                   - name
                   - type_
                   + Group(Optional(positive_integer))
                   + Group(Optional(Keyword('-m')))
                   + Group(Optional(sig_unit)
                           + Optional(sig_factor)
                           + Optional(sig_offset)
                           + Optional(sig_min)
                           + Optional(sig_max)
                           + Optional(sig_default)
                           + Optional(sig_long_name)
                           + Optional(sig_enum)))

    symbol = Group(Suppress(lb)
                   - name
                   - Suppress(rb)
                   - Group(Optional(Keyword('ID')
                                    + assign
                                    + word))
                   - Group(Keyword('Len')
                           + assign
                           + positive_integer)
                   + Group(Optional(Keyword('Mux')
                                    + assign
                                    + word
                                    + positive_integer
                                    + comma
                                    + positive_integer
                                    + positive_integer))
                   + Group(Optional(Keyword('CycleTime')
                                    + assign
                                    + positive_integer))
                   + Group(Optional(Keyword('Timeout')
                                    + assign
                                    + positive_integer))
                   + Group(Optional(Keyword('MinInterval')
                                    + assign
                                    + positive_integer))
                   + Group(ZeroOrMore(Group(Keyword('Sig')
                                            + assign
                                            + name
                                            + positive_integer))))

    enums = Group(Keyword('{ENUMS}')
                  + Group(ZeroOrMore(enum)))
    signals = Group(Keyword('{SIGNALS}')
                    + Group(ZeroOrMore(signal)))
    send = Group(Keyword('{SEND}')
                 + Group(ZeroOrMore(symbol)))
    receive = Group(Keyword('{RECEIVE}')
                    + Group(ZeroOrMore(symbol)))
    sendreceive = Group(Keyword('{SENDRECEIVE}')
                        + Group(ZeroOrMore(symbol)))

    section = (enums
               | signals
               | send
               | receive
               | sendreceive)

    grammar = (version
               - title
               + Group(OneOrMore(section))
               + StringEnd())
    grammar.ignore(dblSlashComment)

    return grammar
Esempio n. 6
0
        return '%s/%d' % (g.functor, g.arity)

    def listing(self):
        for key in self.index:
            print '% rules for', key
            for r in self.index[key]:
                print r


##############################################################################
## the parser
##############################################################################

from pyparsing import Word, CharsNotIn, alphas, alphanums, delimitedList, nestedExpr, Optional, Group, QuotedString

atomNT = Word(alphanums + "_" + "#" + "/" + "@") | QuotedString(quoteChar="'",
                                                                escChar="\\")
goalNT = atomNT + Optional("(" + delimitedList(atomNT) + ")")
goalListNT = Optional(delimitedList(Group(goalNT)))
featureFindAllNT = Optional(":" + delimitedList(Group(goalNT)))
featureTemplateNT = delimitedList(Group(goalNT))
featureBlockNT = Optional("{" + featureTemplateNT('ftemplate') +
                          featureFindAllNT('ffindall') + "}")
ruleNT = goalNT("lhs") + ":-" + goalListNT("rhs") + featureBlockNT(
    "features") + "."


class Parser(object):
    @staticmethod
    def _convertGoal(ptree):
        return Goal(ptree[0], ptree[2:-1])
Esempio n. 7
0
binaryOp = oneOf("== != < > >= <= in notin", caseless=True)('operator')

E = CaselessLiteral("E")
numberSign = Word("+-", exact=1)
realNumber = Combine(
    Optional(numberSign) + (Word(nums) + "." + Optional(Word(nums))
                            | ("." + Word(nums))) +
    Optional(E + Optional(numberSign) + Word(nums)))

integer = Combine(
    Optional(numberSign) + Word(nums) +
    Optional(E + Optional("+") + Word(nums)))

# str_ = quotedString.addParseAction(removeQuotes)
str_ = QuotedString('"') | QuotedString("'")
bool_ = oneOf('true false', caseless=True)

simpleVals = (realNumber.setParseAction(lambda toks: float(toks[0]))
              | integer.setParseAction(lambda toks: int(toks[0]))
              | str_
              | bool_.setParseAction(lambda toks: toks[0] == 'true')
              | propertyPath.setParseAction(lambda toks: SubstituteVal(toks))
              )  # need to add support for alg expressions

propertyVal = (simpleVals
               | (lparen + Group(delimitedList(simpleVals)) + rparen))

boolExpression = Forward()
boolCondition = Group((Group(propertyVal)('lval') + binaryOp +
                       Group(propertyVal)('rval'))
Esempio n. 8
0
# Search operator
OPERATOR = oneOf(OPERATOR_MAP.keys())

# Field name, explicitely exlude URL like patters
FIELD = Regex(r"""(?!http|ftp|https|mailto)[a-zA-Z_]+""")

# Match token
WORD = Regex(r"""[^ \(\)]([^ '"]*[^ '"\)])?""")
DATE = Word("0123456789:.-T")

# Date range
RANGE = "[" + DATE + "to" + DATE + "]"

# Match value
REGEX_STRING = "r" + QuotedString('"', escChar="\\")
STRING = (
    REGEX_STRING
    | QuotedString("'", escChar="\\")
    | QuotedString('"', escChar="\\")
    | WORD
)

# Single term, either field specific or not
TERM = (FIELD + OPERATOR + (RANGE | STRING)) | STRING

# Multi term with or without operator
QUERY = Optional(
    infixNotation(
        TERM,
        [
Esempio n. 9
0
    def parse_selector(cls, selector):
        """
        Parse a selector and return list of tokens

        Selector formats:
            ALL (or blank)              match any entity
            <val1> == <val2>            values are same
            <val1> in <val2>            second value is list containing 1st value, 
                                        or values are same, or val1 is None.

            <val1> and <val2> may be:

            [<field-id>]                refers to field in entity under test
            <name>[<field-id>]          refers to field of context value, or None if the
                                        indicated context value or field is not defined.
            "<string>"                  literal string value.  Quotes within are escaped.

        <field_id> values are URIs or CURIEs, using characters defined by RFC3986,
        except "[" and "]"
        
        RFC3986:
           unreserved    = ALPHA / DIGIT / "-" / "." / "_" / "~"
           reserved      = gen-delims / sub-delims
           gen-delims    = ":" / "/" / "?" / "#" / "[" / "]" / "@"
           sub-delims    = "!" / "$" / "&" / "'" / "(" / ")"
                         / "*" / "+" / "," / ";" / "="
        """
        def get_value(val_list):
            if len(val_list) == 1:
                return {
                    'type': 'literal',
                    'name': None,
                    'field_id': None,
                    'value': val_list[0]
                }
            elif val_list[0] == '[':
                return {
                    'type': 'entity',
                    'name': None,
                    'field_id': val_list[1],
                    'value': None
                }
            elif val_list[1] == '[':
                return {
                    'type': 'context',
                    'name': val_list[0],
                    'field_id': val_list[2],
                    'value': None
                }
            else:
                return {
                    'type': 'unknown',
                    'name': None,
                    'field_id': None,
                    'value': None
                }

        p_name = Word(alphas + "_", alphanums + "_")
        p_id = Word(alphas + "_@", alphanums + "_-.~:/?#@!$&'()*+,;=)")
        p_val = (Group(Literal("[") + p_id + Literal("]"))
                 | Group(p_name + Literal("[") + p_id + Literal("]"))
                 | Group(QuotedString('"', "\\"))
                 | Group(QuotedString("'", "\\"))
                 | Group(p_id))
        p_comp = (Literal("==") | Literal("in"))
        p_selector = (p_val + p_comp + p_val + StringEnd())
        try:
            resultlist = p_selector.parseString(selector).asList()
        except ParseException:
            return None
        resultdict = {}
        if resultlist:
            resultdict['val1'] = get_value(resultlist[0])
            resultdict['comp'] = resultlist[1]
            resultdict['val2'] = get_value(resultlist[2])
        return resultdict
Esempio n. 10
0
and_, or_, not_, to_ = map(CaselessKeyword, 'AND OR NOT TO'.split())
keyword = and_ | or_ | not_ | to_

query_expr = Forward()

required_modifier = Literal('+')('required')
prohibit_modifier = Literal('-')('prohibit')
special_characters = '=><(){}[]^"~*?:\\/'
valid_word = Word(printables, excludeChars=special_characters).setName('word')
valid_word.setParseAction(lambda t: t[0].replace('\\\\', chr(127)).replace(
    '\\', '').replace(chr(127), '\\'))

clause = Forward()
field_name = valid_word()('fieldname')
single_term = valid_word()('singleterm')
phrase = QuotedString('"', unquoteResults=True)('phrase')
wildcard = Regex('[a-z0-9]*[\?\*][a-z0-9]*')('wildcard')
wildcard.setParseAction(lambda t: t[0].replace('?', '.?').replace('*', '.*'))
regex = QuotedString('/', unquoteResults=True)('regex')

_all = Literal('*')
lower_range = Group((LBRACK('inclusive') | LBRACE('exclusive')) +
                    (valid_word | _all)('lowerbound'))
upper_range = Group((valid_word | _all)('upperbound') +
                    (RBRACK('inclusive') | RBRACE('esclusive')))
_range = (lower_range + to_ + upper_range)('range')

GT = Literal('>')
GTE = Literal('>=')
LT = Literal('<')
LTE = Literal('<=')
Esempio n. 11
0
    def _string_to_ast(self, input_string):
        """ Parse a smart search string and return it in an AST like form
        """

        # simple words
        # we need to use a regex to match on words because the regular
        # Word(alphanums) will only match on American ASCII alphanums and since
        # we try to be Unicode / internationally friendly we need to match much
        # much more. Trying to expand a word class to catch it all seems futile
        # so we match on everything *except* a few things, like our operators
        comp_word = Regex("[^*\s=><~!]+")
        word = Regex("[^*\s=><~!]+").setResultsName('word')
        # numbers
        comp_number = Word(nums)
        number = Word(nums).setResultsName('number')

        # IPv4 address
        ipv4_oct = Regex("((2(5[0-5]|[0-4][0-9])|[01]?[0-9][0-9]?))")
        comp_ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct * 3))
        ipv4_address = Combine(ipv4_oct +
                               ('.' +
                                ipv4_oct * 3)).setResultsName('ipv4_address')

        # IPv6 address
        ipv6_address = Regex(
            "((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?"
        ).setResultsName('ipv6_address')
        ipv6_prefix = Combine(ipv6_address +
                              Regex("/(12[0-8]|1[01][0-9]|[0-9][0-9]?)")
                              ).setResultsName('ipv6_prefix')

        # VRF RTs of the form number:number
        vrf_rt = Combine((comp_ipv4_address | comp_number) + Literal(':') +
                         comp_number).setResultsName('vrf_rt')

        # tags
        tags = Combine(Literal('#') + comp_word).setResultsName('tag')

        # operators for matching
        match_op = oneOf(' '.join(
            self.match_operators)).setResultsName('operator')
        boolean_op = oneOf(' '.join(
            self.boolean_operators)).setResultsName('boolean')
        # quoted string
        d_quoted_string = QuotedString('"', unquoteResults=True, escChar='\\')
        s_quoted_string = QuotedString('\'', unquoteResults=True, escChar='\\')
        quoted_string = (s_quoted_string
                         | d_quoted_string).setResultsName('quoted_string')
        # expression to match a certain value for an attribute
        expression = Group(word + match_op +
                           (quoted_string | vrf_rt | word
                            | number)).setResultsName('expression')
        # we work on atoms, which are single quoted strings, match expressions,
        # tags, VRF RT or simple words.
        # NOTE: Place them in order of most exact match first!
        atom = Group(ipv6_prefix | ipv6_address | quoted_string | expression
                     | tags | vrf_rt | boolean_op | word)

        enclosed = Forward()
        parens = nestedExpr('(', ')', content=enclosed)
        enclosed << (parens | atom).setResultsName('nested')

        content = Forward()
        content << (ZeroOrMore(enclosed))

        res = content.parseString(input_string)
        return res
Esempio n. 12
0
def _create_primitives():
    global binary, ident, rvalue, number, quoted_string, semi, tick_interval, time_interval, slot_id, comp, config_type, stream, comment, stream_trigger, selector

    if ident is not None:
        return

    semi = Literal(u';').suppress()
    ident = Word(alphas + u"_", alphas + nums + u"_")
    number = Regex(u'((0x[a-fA-F0-9]+)|[+-]?[0-9]+)').setParseAction(
        lambda s, l, t: [int(t[0], 0)])
    binary = Regex(u'hex:([a-fA-F0-9][a-fA-F0-9])+').setParseAction(
        lambda s, l, t: [unhexlify(t[0][4:])])
    quoted_string = QuotedString(quoteChar='"',
                                 escChar="\\",
                                 unquoteResults=True)

    comment = Literal('#') + restOfLine

    rvalue = number | quoted_string

    # Convert all time intervals into an integer number of seconds
    time_unit_multipliers = {
        u'second': 1,
        u'seconds': 1,
        u'minute': 60,
        u'minutes': 60,
        u'hour': 60 * 60,
        u'hours': 60 * 60,
        u'day': 60 * 60 * 24,
        u'days': 60 * 60 * 24,
        u'month': 60 * 60 * 24 * 30,
        u'months': 60 * 60 * 24 * 30,
        u'year': 60 * 60 * 24 * 365,
        u'years': 60 * 60 * 24 * 365,
    }

    config_type = oneOf(
        'uint8_t uint16_t uint32_t int8_t int16_t int32_t uint8_t[] uint16_t[] uint32_t[] int8_t[] int16_t[] int32_t[] string binary'
    )
    comp = oneOf('> < >= <= == ~=')

    # Time intervals are all based on internal system clocks so we include a 'system' tag
    time_unit = oneOf(
        u"second seconds minute minutes hour hours day days week weeks month months year years"
    )
    time_interval = (number + time_unit).setParseAction(
        lambda s, l, t: [t[0] * time_unit_multipliers[t[1]], 'system'])
    tick_interval = (number + (Literal("tick_1") | Literal("tick_2"))
                     ).setParseAction(lambda s, l, t: [t[0], t[1]])

    slot_id = Literal(u"controller") | (Literal(u'slot') + number)
    slot_id.setParseAction(
        lambda s, l, t:
        [SlotIdentifier.FromString(u' '.join([str(x) for x in t]))])

    stream_modifier = Literal("system") | Literal("user") | Literal("combined")

    stream = Optional(Literal(
        "system")) + oneOf("buffered unbuffered input output counter constant"
                           ) + number + Optional(Literal("node"))
    stream.setParseAction(
        lambda s, l, t:
        [DataStream.FromString(u' '.join([str(x) for x in t]))])

    all_selector = Optional(
        Literal("all")) + Optional(stream_modifier) + oneOf(
            "buffered unbuffered inputs outputs counters constants"
        ) + Optional(Literal("nodes"))
    all_selector.setParseAction(
        lambda s, l, t:
        [DataStreamSelector.FromString(u' '.join([str(x) for x in t]))])
    one_selector = Optional(Literal(
        "system")) + oneOf("buffered unbuffered input output counter constant"
                           ) + number + Optional(Literal("node"))
    one_selector.setParseAction(
        lambda s, l, t:
        [DataStreamSelector.FromString(u' '.join([str(x) for x in t]))])

    selector = one_selector | all_selector

    trigger_comp = oneOf('> < >= <= ==')
    stream_trigger = Group((Literal(u'count') | Literal(u'value')) +
                           Literal(u'(').suppress() - stream -
                           Literal(u')').suppress() - trigger_comp -
                           number).setResultsName('stream_trigger')
Esempio n. 13
0

def number_parse_action(instring, tokensStart, retTokens):
    parsed_values = retTokens[0]
    return types.Number(float(parsed_values))


number = Combine(Word(nums) + Optional(Literal('.') + Word(nums)))

number.addParseAction(number_parse_action)

##########
# String #
##########

single_quote_string = QuotedString('\'', escChar='\\')
double_quote_string = QuotedString('"', escChar='\\')


def string_parse_action(instring, tokensStart, retTokens):
    parsed_values = retTokens[0]
    return types.String(parsed_values)


string = single_quote_string | double_quote_string
string.addParseAction(string_parse_action)

############
# Function #
############
Esempio n. 14
0
labeloffset = Combine(labelprefix + Word(':', alphanums + "_-+"))
labeloffset.setName('labeloffset')
# Bytes can be represented in binary, hex, char, or a number (0-255 or -128-127)
# and may include embedded arithmetic
#  OPCODE 0b00001100
#  OPCODE 0x0b
#  OPCODE 'a'
#  OPCODE 254-0x0a
#  OPCODE 'a'&0b00001111
binbyte = Combine(Literal('0b') + Char('01') * 8)
binbyte.setName('binbyte')
binbyte.setParseAction(lambda t: [int(t[0], 2)])
hexbyte = Combine(Literal('0x') + Char(srange("[0-9a-fA-F]")) * 2)
hexbyte.setName('hexbyte')
hexbyte.setParseAction(lambda t: [int(t[0], 16)])
chrbyte = QuotedString(quoteChar="'", unquoteResults=True)
chrbyte.setName('char')
chrbyte.setParseAction(lambda t: [ord(t[0])])
number = Word(nums + '-')
number.setName('number')
number.setParseAction(lambda t: [int(t[0])])
allbytes = binbyte | hexbyte | chrbyte | number
mathtoken = Combine(oneOf('+ - & |') + allbytes)
bytemathexpression = Combine(allbytes + OneOrMore(mathtoken))
bytemathexpression.setParseAction(lambda t: [eval(t[0])])
byte = bytemathexpression | allbytes
byte.setName('byte')
# Words can be represented in binary, hex, label, or number (0-65535 or -32768-32767)
#  OPCODE 0b0000111100001111
#  OPCODE 0x2911
#  OPCODE .label
Esempio n. 15
0
    Word,
    alphanums,
    alphas,
    delimitedList,
    pyparsing_common,
)

from .objects import GDObject

boolean = ((Keyword("true") | Keyword("false")
            ).setName("bool").setParseAction(lambda x: x[0].lower() == "true"))

null = Keyword("null").setParseAction(lambda _: [None])

primitive = (null
             | QuotedString('"', escChar="\\", multiline=True)
             | boolean
             | pyparsing_common.number)
value = Forward()

# Vector2( 1, 2 )
obj_type = (Word(alphas, alphanums).setResultsName("object_name") +
            Suppress("(") + delimitedList(value) +
            Suppress(")")).setParseAction(GDObject.from_parser)

# [ 1, 2 ] or [ 1, 2, ]
list_ = (Group(
    Suppress("[") + Optional(delimitedList(value)) + Optional(Suppress(",")) +
    Suppress("]")).setName("list").setParseAction(lambda p: p.asList()))
key_val = Group(QuotedString('"', escChar="\\") + Suppress(":") + value)
Esempio n. 16
0
def iter_trees(infile):
    import pyparsing
    pyparsing.ParserElement.enablePackrat()
    from pyparsing import (Word, Literal, QuotedString, CaselessKeyword,
                           CharsNotIn, OneOrMore, Group, Optional, Suppress,
                           Regex, Dict, ZeroOrMore, alphanums, nums)
    comment = Optional(Suppress("[&") + Regex(r'[^]]+') + Suppress("]"))
    name = Word(alphanums + "_.") | QuotedString("'")
    newick = Regex(r'[^;]+;')
    tree = (CaselessKeyword("tree").suppress() + Optional("*").suppress() +
            name.setResultsName("tree_name") +
            comment.setResultsName("tree_comment") + Suppress("=") +
            comment.setResultsName("root_comment") +
            newick.setResultsName("newick"))

    def not_begin(s):
        # print('not_begin', s)
        return s.strip().lower() != "begin trees;"

    def not_end(s):
        # print('not_end', s)
        return s.strip().lower() not in ("end;", "endblock;")

    def parse_ttable(f):
        ttable = {}
        # com = Suppress('[') + ZeroOrMore(CharsNotIn(']')) + Suppress(']')
        com = Suppress('[' + ZeroOrMore(CharsNotIn(']') + ']'))
        while True:
            s = next(f).strip()
            if not s:
                continue
            s = com.transformString(s).strip()
            if s.lower() == ";":
                break
            b = False
            if s[-1] in ",;":
                if s[-1] == ';':
                    b = True
                s = s[:-1]
            # print(s)
            k, v = s.split()
            ttable[k] = v
            if b:
                break
        return ttable

    # read lines between "begin trees;" and "end;"
    f = itertools.takewhile(not_end, itertools.dropwhile(not_begin, infile))
    s = next(f).strip().lower()
    if s != "begin trees;":
        print("Expecting 'begin trees;', got %s" % s, file=sys.stderr)
        raise StopIteration
    ttable = {}
    while True:
        try:
            s = next(f).strip()
        except StopIteration:
            break
        if not s:
            continue
        if s.lower() == "translate":
            ttable = parse_ttable(f)
            # print "ttable: %s" % len(ttable)
        elif s.split()[0].lower() == 'tree':
            match = tree.parseString(s)
            yield Newick(match, ttable)
Esempio n. 17
0
    infixNotation,
    opAssoc,
    dblQuotedString,
    delimitedList,
    Combine,
    Literal,
    QuotedString,
    ParserElement,
    pyparsing_common as ppc,
)

ParserElement.enablePackrat()

EQ, LPAR, RPAR, COLON, COMMA = map(Suppress, "=():,")
EXCL, DOLLAR = map(Literal, "!$")
sheetRef = Word(alphas, alphanums) | QuotedString("'", escQuote="''")
colRef = Optional(DOLLAR) + Word(alphas, max=2)
rowRef = Optional(DOLLAR) + Word(nums)
cellRef = Combine(
    Group(Optional(sheetRef + EXCL)("sheet") + colRef("col") + rowRef("row")))

cellRange = (Group(cellRef("start") + COLON + cellRef("end"))("range")
             | cellRef
             | Word(alphas, alphanums))

expr = Forward()

COMPARISON_OP = oneOf("< = > >= <= != <>")
condExpr = expr + COMPARISON_OP + expr

ifFunc = (CaselessKeyword("if") - LPAR + Group(condExpr)("condition") + COMMA +
Esempio n. 18
0
class AssessmentParser13(object):
    """Grammar and parser for the assessment."""

    string = (
        QuotedString('\'', escChar='\\', multiline=True) ^
        QuotedString('"', escChar='\\', multiline=True))

    boolean = (
        Literal('true').setParseAction(make_bool(True)) ^
        Literal('false').setParseAction(make_bool(False)))

    float = Combine(
        Word(nums) + Optional(Literal('.') + Word(nums))
    ).setParseAction(make_float)

    integer = Word(nums).setParseAction(make_int)

    choice_decl = (
        string ^
        Combine(
            sep('correct(') + string + sep(')')
        ).setParseAction(lambda x: verify.Term(verify.CORRECT, x[0]))
    )

    regex = (
        Regex('/(.*)/i') ^
        Combine(
            sep('regex(') +
            QuotedString('"', escChar='\\') +
            sep(')')
        ).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
    )

    question_decl = (
        sep('{') +
        Each(
            Optional(
                key('questionHTML') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('lesson') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('correctAnswerString') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('correctAnswerRegex') + sep(':') +
                regex + Optional(sep(','))) +
            Optional(
                key('correctAnswerNumeric') + sep(':') +
                float + Optional(sep(','))) +
            Optional(
                key('choiceScores') + sep(':') +
                sep('[') +
                Group(list_of(float)).setParseAction(make_list) +
                sep(']') +
                Optional(sep(','))) +
            Optional(
                key('weight') + sep(':') + integer + Optional(sep(','))) +
            Optional(
                key('multiLine') + sep(':') +
                boolean + Optional(sep(','))) +
            Optional(
                key('choices') + sep(':') +
                sep('[') +
                Group(list_of(choice_decl)).setParseAction(make_list) +
                sep(']') +
                Optional(sep(',')))
        ) +
        sep('}')).setParseAction(make_dict)

    assessment_grammar = (
        sep('assessment') +
        sep('=') +
        sep('{') +
        Each(
            Optional(
                key('assessmentName') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('preamble') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('checkAnswers') + sep(':') +
                boolean + Optional(sep(','))) +
            Optional(
                key('questionsList') + sep(':') +
                sep('[') +
                Group(list_of(question_decl)).setParseAction(make_list) +
                sep(']') +
                Optional(sep(',')))
        ) +
        sep('}') +
        Optional(sep(';'))).setParseAction(make_dict)

    @classmethod
    def parse_string(cls, content):
        return cls.assessment_grammar.parseString(content)

    @classmethod
    def parse_string_in_scope(cls, content, scope, root_name):
        """Parses assessment text following grammar."""
        if 'assessment' != root_name:
            raise Exception('Unsupported schema: %s', root_name)

        # we need to extract the results as a dictionary; so we remove the
        # outer array holding it
        ast = cls.parse_string(content).asList()
        if len(ast) == 1:
            ast = ast[0]

        return dict(
            scope.items() +
            {'__builtins__': {}}.items() +
            {root_name: ast}.items())
Esempio n. 19
0
def __make_parser():
    key = Word(alphas, alphanums + "_-")
    value = Word(alphanums + "-.,_=<>!@$%^&*[]{}:;|/'") | QuotedString('"')
    return Dict(
        ZeroOrMore(Group(key + Optional(Suppress("=") + value, default=True))))
Esempio n. 20
0
class ActivityParser13(object):
    """Grammar and parser for the activity."""

    variable = Word(alphas)
    integer = Word(nums).setParseAction(make_int)
    string = (
        QuotedString('\'', escChar='\\', multiline=True) ^
        QuotedString('"', escChar='\\', multiline=True))
    boolean = (
        Literal('true').setParseAction(make_bool(True)) ^
        Literal('false').setParseAction(make_bool(False)))

    regex = (
        Regex('/(.*)/i') ^
        Combine(
            sep('regex(') +
            QuotedString('"', escChar='\\') +
            sep(')')
        ).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
    )

    choice_decl = Group(
        sep('[') +
        string + sep(',') +
        boolean + sep(',') +
        string +
        sep(']')
    )

    choices_decl = Group(
        sep('[') +
        Optional(list_of(choice_decl)) +
        sep(']')
    ).setParseAction(make_list)

    multiple_choice_decl = (
        key('questionType') + sep(':') + key('multiple choice') +
        Optional(sep(','))
    )

    multiple_choice = (
        sep('{') +
        multiple_choice_decl +
        Each(
            Optional(
                key('questionHTML') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('choices') + sep(':') +
                choices_decl + Optional(sep(',')))
        ) +
        sep('}')
    ).setParseAction(make_dict)

    free_text_decl = (
        key('questionType') + sep(':') + key('freetext') +
        Optional(sep(','))
    )

    free_text = (
        sep('{') +
        free_text_decl +
        Each(
            Optional(
                key('questionHTML') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('correctAnswerRegex') + sep(':') +
                regex + Optional(sep(','))) +
            Optional(
                key('correctAnswerOutput') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('incorrectAnswerOutput') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('showAnswerPrompt') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('showAnswerOutput') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('outputHeight') + sep(':') +
                string + Optional(sep(',')))
        ) +
        sep('}')
    ).setParseAction(make_dict)

    question_list_decl = (
        sep('{') +
        Each(
            Optional(
                key('questionHTML') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('choices') + sep(':') +
                sep('[') +
                Group(list_of(string)).setParseAction(make_list) +
                sep(']') +
                Optional(sep(','))) +
            Optional(
                key('correctIndex') + sep(':') +
                (integer ^ (
                    sep('[') +
                    Group(list_of(integer)).setParseAction(make_list) +
                    sep(']'))) +
                Optional(sep(','))) +
            Optional(
                key('multiSelect') + sep(':') +
                boolean + Optional(sep(','))),
        ) +
        sep('}')).setParseAction(make_dict)

    questions_list_decl = Group(
        sep('[') +
        Optional(list_of(question_list_decl)) +
        sep(']')
    ).setParseAction(make_list)

    multiple_choice_group_decl = (
        key('questionType') + sep(':') + key('multiple choice group') +
        Optional(sep(','))
    )

    multiple_choice_group = (
        sep('{') +
        multiple_choice_group_decl +
        Each(
            Optional(
                key('questionGroupHTML') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('allCorrectMinCount') + sep(':') +
                integer + Optional(sep(','))) +
            Optional(
                key('allCorrectOutput') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('someIncorrectOutput') + sep(':') +
                string + Optional(sep(','))) +
            Optional(
                key('questionsList') + sep(':') +
                questions_list_decl + Optional(sep(',')))
        ) +
        sep('}')
    ).setParseAction(make_dict)

    activity_grammar = (
        sep('activity') +
        sep('=') +
        sep('[') +
        Optional(list_of(
            string ^ multiple_choice ^ free_text ^ multiple_choice_group)) +
        sep(']') +
        Optional(sep(';')))

    @classmethod
    def parse_string(cls, content):
        return cls.activity_grammar.parseString(content)

    @classmethod
    def parse_string_in_scope(cls, content, scope, root_name):
        """Parses activity text following grammar."""
        if 'activity' != root_name:
            raise Exception('Unsupported schema: %s', root_name)
        return dict(
            scope.items() +
            {'__builtins__': {}}.items() +
            {root_name: cls.parse_string(content).asList()}.items())
Esempio n. 21
0
COLON, LBRACK, RBRACK, LBRACE, RBRACE, TILDE, CARAT = map(Literal, ":[]{}~^")
LPAR, RPAR = map(Suppress, "()")
and_ = CaselessKeyword("AND")
or_ = CaselessKeyword("OR")
not_ = CaselessKeyword("NOT")
to_ = CaselessKeyword("TO")
keyword = and_ | or_ | not_

expression = Forward()

valid_word = Regex(r'([a-zA-Z0-9*_+.-]|\\[!(){}\[\]^"~*?\\:])+').setName(
    "word")
valid_word.setParseAction(lambda t: t[0].replace('\\\\', chr(127)).replace(
    '\\', '').replace(chr(127), '\\'))

string = QuotedString('"')

required_modifier = Literal("+")("required")
prohibit_modifier = Literal("-")("prohibit")
integer = Regex(r"\d+").setParseAction(lambda t: int(t[0]))
proximity_modifier = Group(TILDE + integer("proximity"))
number = Regex(r'\d+(\.\d+)?').setParseAction(lambda t: float(t[0]))
fuzzy_modifier = TILDE + Optional(number, default=0.5)("fuzzy")

term = Forward()
field_name = valid_word.copy().setName("fieldname")
incl_range_search = Group(LBRACK + term("lower") + to_ + term("upper") +
                          RBRACK)
excl_range_search = Group(LBRACE + term("lower") + to_ + term("upper") +
                          RBRACE)
range_search = incl_range_search("incl_range") | excl_range_search(
Esempio n. 22
0
    def parse(cls,
              content,
              basedir=None,
              resolve=True,
              unresolved_value=DEFAULT_SUBSTITUTION):
        """parse a HOCON content

        :param content: HOCON content to parse
        :type content: basestring
        :param resolve: if true, resolve substitutions
        :type resolve: boolean
        :param unresolved_value: assigned value value to unresolved substitution.
        If overriden with a default value, it will replace all unresolved value to the default value.
        If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its substitution expression (e.g., ${x})
        :type unresolved_value: boolean
        :return: a ConfigTree or a list
        """

        unescape_pattern = re.compile(r'\\.')

        def replace_escape_sequence(match):
            value = match.group(0)
            return cls.REPLACEMENTS.get(value, value)

        def norm_string(value):
            return unescape_pattern.sub(replace_escape_sequence, value)

        def unescape_string(tokens):
            return ConfigUnquotedString(norm_string(tokens[0]))

        def parse_multi_string(tokens):
            # remove the first and last 3 "
            return tokens[0][3:-3]

        def convert_number(tokens):
            n = tokens[0]
            try:
                return int(n, 10)
            except ValueError:
                return float(n)

        def safe_convert_number(tokens):
            n = tokens[0]
            try:
                return int(n, 10)
            except ValueError:
                try:
                    return float(n)
                except ValueError:
                    return n

        def convert_period(tokens):

            period_value = int(tokens.value)
            period_identifier = tokens.unit

            period_unit = next((single_unit for single_unit, values in
                                cls.get_supported_period_type_map().items()
                                if period_identifier in values))

            return period(period_value, period_unit)

        # ${path} or ${?path} for optional substitution
        SUBSTITUTION_PATTERN = r"\$\{(?P<optional>\?)?(?P<variable>[^}]+)\}(?P<ws>[ \t]*)"

        def create_substitution(instring, loc, token):
            # remove the ${ and }
            match = re.match(SUBSTITUTION_PATTERN, token[0])
            variable = match.group('variable')
            ws = match.group('ws')
            optional = match.group('optional') == '?'
            substitution = ConfigSubstitution(variable, optional, ws, instring,
                                              loc)
            return substitution

        # ${path} or ${?path} for optional substitution
        STRING_PATTERN = '"(?P<value>(?:[^"\\\\]|\\\\.)*)"(?P<ws>[ \t]*)'

        def create_quoted_string(instring, loc, token):
            # remove the ${ and }
            match = re.match(STRING_PATTERN, token[0])
            value = norm_string(match.group('value'))
            ws = match.group('ws')
            return ConfigQuotedString(value, ws, instring, loc)

        def include_config(instring, loc, token):
            url = None
            file = None
            required = False

            if token[0] == 'required':
                required = True
                final_tokens = token[1:]
            else:
                final_tokens = token

            if len(final_tokens) == 1:  # include "test"
                value = final_tokens[0].value if isinstance(
                    final_tokens[0], ConfigQuotedString) else final_tokens[0]
                if value.startswith("http://") or value.startswith(
                        "https://") or value.startswith("file://"):
                    url = value
                else:
                    file = value
            elif len(final_tokens) == 2:  # include url("test") or file("test")
                value = final_tokens[1].value if isinstance(
                    token[1], ConfigQuotedString) else final_tokens[1]
                if final_tokens[0] == 'url':
                    url = value
                else:
                    file = value

            if url is not None:
                logger.debug('Loading config from url %s', url)
                obj = ConfigFactory.parse_URL(url,
                                              resolve=False,
                                              required=required,
                                              unresolved_value=NO_SUBSTITUTION)
            elif file is not None:
                path = file if basedir is None else os.path.join(basedir, file)
                logger.debug('Loading config from file %s', path)
                obj = ConfigFactory.parse_file(
                    path,
                    resolve=False,
                    required=required,
                    unresolved_value=NO_SUBSTITUTION)
            else:
                raise ConfigException(
                    'No file or URL specified at: {loc}: {instring}',
                    loc=loc,
                    instring=instring)

            return ConfigInclude(obj if isinstance(obj, list) else obj.items())

        @contextlib.contextmanager
        def set_default_white_spaces():
            default = ParserElement.DEFAULT_WHITE_CHARS
            ParserElement.setDefaultWhitespaceChars(' \t')
            yield
            ParserElement.setDefaultWhitespaceChars(default)

        with set_default_white_spaces():
            assign_expr = Forward()
            true_expr = Keyword("true", caseless=True).setParseAction(
                replaceWith(True))
            false_expr = Keyword("false", caseless=True).setParseAction(
                replaceWith(False))
            null_expr = Keyword("null", caseless=True).setParseAction(
                replaceWith(NoneValue()))
            # key = QuotedString('"', escChar='\\', unquoteResults=False) | Word(alphanums + alphas8bit + '._- /')
            key = QuotedString('"', escChar='\\', unquoteResults=False) | \
                  Word("0123456789.").setParseAction(safe_convert_number) | Word(alphanums + alphas8bit + '._- /')

            eol = Word('\n\r').suppress()
            eol_comma = Word('\n\r,').suppress()
            comment = (Literal('#') | Literal('//')) - SkipTo(eol
                                                              | StringEnd())
            comment_eol = Suppress(Optional(eol_comma) + comment)
            comment_no_comma_eol = (comment | eol).suppress()
            number_expr = Regex(
                r'[+-]?(\d*\.\d+|\d+(\.\d+)?)([eE][+\-]?\d+)?(?=$|[ \t]*([\$\}\],#\n\r]|//))',
                re.DOTALL).setParseAction(convert_number)

            period_types = itertools.chain.from_iterable(
                cls.get_supported_period_type_map().values())
            period_expr = Regex(r'(?P<value>\d+)\s*(?P<unit>' +
                                '|'.join(period_types) +
                                ')$').setParseAction(convert_period)

            # multi line string using """
            # Using fix described in http://pyparsing.wikispaces.com/share/view/3778969
            multiline_string = Regex(
                '""".*?"*"""',
                re.DOTALL | re.UNICODE).setParseAction(parse_multi_string)
            # single quoted line string
            quoted_string = Regex(
                r'"(?:[^"\\\n]|\\.)*"[ \t]*',
                re.UNICODE).setParseAction(create_quoted_string)
            # unquoted string that takes the rest of the line until an optional comment
            # we support .properties multiline support which is like this:
            # line1  \
            # line2 \
            # so a backslash precedes the \n
            unquoted_string = Regex(
                r'(?:[^^`+?!@*&"\[\{\s\]\}#,=\$\\]|\\.)+[ \t]*',
                re.UNICODE).setParseAction(unescape_string)
            substitution_expr = Regex(r'[ \t]*\$\{[^\}]+\}[ \t]*'
                                      ).setParseAction(create_substitution)
            string_expr = multiline_string | quoted_string | unquoted_string

            value_expr = period_expr | number_expr | true_expr | false_expr | null_expr | string_expr

            include_content = (quoted_string | (
                (Keyword('url') | Keyword('file')) - Literal('(').suppress() -
                quoted_string - Literal(')').suppress()))
            include_expr = (Keyword("include", caseless=True).suppress() +
                            (include_content |
                             (Keyword("required") - Literal('(').suppress() -
                              include_content - Literal(')').suppress()))
                            ).setParseAction(include_config)

            root_dict_expr = Forward()
            dict_expr = Forward()
            list_expr = Forward()
            multi_value_expr = ZeroOrMore(comment_eol | include_expr
                                          | substitution_expr | dict_expr
                                          | list_expr | value_expr
                                          | (Literal('\\') - eol).suppress())
            # for a dictionary : or = is optional
            # last zeroOrMore is because we can have t = {a:4} {b: 6} {c: 7} which is dictionary concatenation
            inside_dict_expr = ConfigTreeParser(
                ZeroOrMore(comment_eol | include_expr | assign_expr
                           | eol_comma))
            inside_root_dict_expr = ConfigTreeParser(
                ZeroOrMore(comment_eol | include_expr | assign_expr
                           | eol_comma),
                root=True)
            dict_expr << Suppress('{') - inside_dict_expr - Suppress('}')
            root_dict_expr << Suppress('{') - inside_root_dict_expr - Suppress(
                '}')
            list_entry = ConcatenatedValueParser(multi_value_expr)
            list_expr << Suppress('[') - ListParser(list_entry - ZeroOrMore(
                eol_comma - list_entry)) - Suppress(']')

            # special case when we have a value assignment where the string can potentially be the remainder of the line
            assign_expr << Group(key - ZeroOrMore(comment_no_comma_eol) - (
                dict_expr | (Literal('=') | Literal(':') | Literal('+=')) -
                ZeroOrMore(comment_no_comma_eol) -
                ConcatenatedValueParser(multi_value_expr)))

            # the file can be { ... } where {} can be omitted or []
            config_expr = ZeroOrMore(comment_eol | eol) + (
                list_expr | root_dict_expr
                | inside_root_dict_expr) + ZeroOrMore(comment_eol | eol_comma)
            config = config_expr.parseString(content, parseAll=True)[0]

            if resolve:
                allow_unresolved = resolve and unresolved_value is not DEFAULT_SUBSTITUTION and unresolved_value is not MANDATORY_SUBSTITUTION
                has_unresolved = cls.resolve_substitutions(
                    config, allow_unresolved)
                if has_unresolved and unresolved_value is MANDATORY_SUBSTITUTION:
                    raise ConfigSubstitutionException(
                        'resolve cannot be set to True and unresolved_value to MANDATORY_SUBSTITUTION'
                    )

            if unresolved_value is not NO_SUBSTITUTION and unresolved_value is not DEFAULT_SUBSTITUTION:
                cls.unresolve_substitutions_to_value(config, unresolved_value)
        return config
Esempio n. 23
0
def _create_field_parser():
    """
    Creates a parser using pyparsing that works with bibfield rule definitions

    BNF like grammar:

    rule ::= ([persitent_identifier] json_id ["[0]" | "[n]"] "," aliases":" INDENT body UNDENT) | include | python_comment
    include ::= "include(" PATH ")"
    body ::=  [inherit_from] (creator | derived | calculated) [checker] [documentation] [producer]
    aliases ::= json_id ["[0]" | "[n]"] ["," aliases]

    creator ::= "creator:" INDENT creator_body+ UNDENT
    creator_body ::= [decorators] source_format "," source_tag "," python_allowed_expr
    source_format ::= MASTER_FORMATS
    source_tag ::= QUOTED_STRING

    derived ::= "derived" INDENT derived_calculated_body UNDENT
    calculated ::= "calculated:" INDENT derived_calculated_body UNDENT
    derived_calculated_body ::= [decorators] "," python_allowed_exp

    decorators ::= (peristent_identfier | legacy | do_not_cache | parse_first | depends_on | only_if | only_if_master_value)*
    peristent_identfier ::= @persitent_identifier( level )
    legacy ::= "@legacy(" correspondences+ ")"
    correspondences ::= "(" source_tag [ "," tag_name ] "," json_id ")"
    parse_first ::= "@parse_first(" jsonid+ ")"
    depends_on ::= "@depends_on(" json_id+ ")"
    only_if ::= "@only_if(" python_condition+ ")"
    only_if_master_value ::= "@only_if_master_value(" python_condition+  ")"

    inherit_from ::= "@inherit_from()"

    python_allowed_exp ::= ident | list_def | dict_def | list_access | dict_access | function_call

    checker ::= "checker:" INDENT checker_function+ UNDENT

    documentation ::= INDENT doc_string subfield* UNDENT
    doc_string ::= QUOTED_STRING
    subfield ::= "@subfield" json_id["."json_id*] ":" docstring

    producer ::= "producer:" INDENT producer_body UNDENT
    producer_body ::= producer_code "," python_dictionary
    producer_code ::= ident
    """

    indent_stack = [1]

    def check_sub_indent(str, location, tokens):
        cur_col = col(location, str)
        if cur_col > indent_stack[-1]:
            indent_stack.append(cur_col)
        else:
            raise ParseException(str, location, "not a subentry")

    def check_unindent(str, location, tokens):
        if location >= len(str):
            return
        cur_col = col(location, str)
        if not (cur_col < indent_stack[-1] and cur_col <= indent_stack[-2]):
            raise ParseException(str, location, "not an unindent")

    def do_unindent():
        indent_stack.pop()

    INDENT = lineEnd.suppress() + empty + empty.copy().setParseAction(
        check_sub_indent)
    UNDENT = FollowedBy(empty).setParseAction(check_unindent)
    UNDENT.setParseAction(do_unindent)

    json_id = (Word(alphas + "_", alphanums + "_") + Optional(oneOf("[0] [n]")))\
              .setResultsName("json_id", listAllMatches=True)\
              .setParseAction(lambda tokens: "".join(tokens))
    aliases = delimitedList((Word(alphanums + "_") + Optional(oneOf("[0] [n]")))
                            .setParseAction(lambda tokens: "".join(tokens)))\
              .setResultsName("aliases")
    ident = Word(alphas + "_", alphanums + "_")
    dict_def = originalTextFor(nestedExpr('{', '}'))
    list_def = originalTextFor(nestedExpr('[', ']'))
    dict_access = list_access = originalTextFor(ident + nestedExpr('[', ']'))
    function_call = originalTextFor(
        ZeroOrMore(ident + ".") + ident + nestedExpr('(', ')'))

    python_allowed_expr = (dict_def ^ list_def ^ dict_access ^ \
            list_access ^ function_call ^ restOfLine)\
            .setResultsName("value", listAllMatches=True)

    persistent_identifier = (Suppress("@persistent_identifier") + \
            nestedExpr("(", ")"))\
            .setResultsName("persistent_identifier")
    legacy = (Suppress("@legacy") + originalTextFor(nestedExpr("(", ")")))\
            .setResultsName("legacy", listAllMatches=True)
    only_if = (Suppress("@only_if") + originalTextFor(nestedExpr("(", ")")))\
            .setResultsName("only_if")
    only_if_master_value = (Suppress("@only_if_value") + \
            originalTextFor(nestedExpr("(", ")")))\
            .setResultsName("only_if_master_value")
    depends_on = (Suppress("@depends_on") + \
            originalTextFor(nestedExpr("(", ")")))\
            .setResultsName("depends_on")
    parse_first = (Suppress("@parse_first") + \
            originalTextFor(nestedExpr("(", ")")))\
            .setResultsName("parse_first")
    memoize = (Suppress("@memoize") + nestedExpr("(", ")"))\
            .setResultsName("memoize")
    field_decorator = parse_first ^ depends_on ^ only_if ^ \
            only_if_master_value ^ memoize ^ legacy

    #Independent decorators
    inherit_from = (Suppress("@inherit_from") + \
            originalTextFor(nestedExpr("(", ")")))\
            .setResultsName("inherit_from")
    override = (Suppress("@") + "override")\
            .setResultsName("override")
    extend = (Suppress("@") + "extend")\
            .setResultsName("extend")
    master_format = (Suppress("@master_format") + \
            originalTextFor(nestedExpr("(", ")")))\
            .setResultsName("master_format") \
            .setParseAction(lambda toks: toks[0])

    derived_calculated_body = (ZeroOrMore(field_decorator) + python_allowed_expr)\
            .setResultsName('derived_calculated_def')

    derived = "derived" + Suppress(":") + \
            INDENT + derived_calculated_body + UNDENT
    calculated = "calculated" + Suppress(":") + \
            INDENT + derived_calculated_body + UNDENT

    source_tag = quotedString\
            .setParseAction(removeQuotes)\
            .setResultsName("source_tag", listAllMatches=True)
    source_format = Word(alphas, alphanums + "_")\
                    .setResultsName("source_format", listAllMatches=True)
    creator_body = (ZeroOrMore(field_decorator) + source_format + \
            Suppress(",") + source_tag + Suppress(",") + python_allowed_expr)\
            .setResultsName("creator_def", listAllMatches=True)
    creator = "creator" + Suppress(":") + \
            INDENT + OneOrMore(creator_body) + UNDENT
    field_def = (creator | derived | calculated)\
                .setResultsName("type_field", listAllMatches=True)

    #JsonExtra
    json_dumps = (Suppress('dumps') + Suppress(',') + python_allowed_expr)\
        .setResultsName("dumps")\
        .setParseAction(lambda toks: toks.value[0])
    json_loads = (Suppress("loads") + Suppress(",") + python_allowed_expr)\
        .setResultsName("loads")\
        .setParseAction(lambda toks: toks.value[0])

    json_extra = (Suppress('json:') + \
            INDENT + Each((json_dumps, json_loads)) + UNDENT)\
            .setResultsName('json_ext')

    #Checker
    checker_function = (Optional(master_format) + ZeroOrMore(ident + ".") + ident + originalTextFor(nestedExpr('(', ')')))\
                       .setResultsName("checker", listAllMatches=True)
    checker = ("checker" + Suppress(":") + INDENT +
               OneOrMore(checker_function) + UNDENT)

    #Description/Documentation
    doc_double = QuotedString(quoteChar='"""', multiline=True)
    doc_single = QuotedString(quoteChar="'''", multiline=True)
    doc_string = INDENT + (doc_double | doc_single) + UNDENT
    description_body = (Suppress('description:') + doc_string).\
                setParseAction(lambda toks: toks[0][0])
    description = (description_body | doc_double | doc_single)\
            .setResultsName('description')

    #Producer
    producer_code = (Word(alphas, alphanums + "_")\
           + originalTextFor(nestedExpr("(", ")")))\
           .setResultsName('producer_code', listAllMatches=True)
    producer_body = (producer_code + Suppress(",") + python_allowed_expr)\
                    .setResultsName("producer_rule", listAllMatches=True)
    producer = Suppress("producer:") + INDENT + OneOrMore(
        producer_body) + UNDENT

    schema = (Suppress('schema:') + INDENT + dict_def + UNDENT)\
            .setParseAction(lambda toks: toks[0])\
            .setResultsName('schema')

    body = Optional(field_def) & Optional(checker) & Optional(json_extra) \
            & Optional(description) & Optional(producer) & Optional(schema)
    comment = Literal("#") + restOfLine + LineEnd()
    include = (Suppress("include") + quotedString)\
              .setResultsName("includes", listAllMatches=True)
    rule = (Optional(persistent_identifier) + Optional(inherit_from) + \
            Optional(override) + Optional(extend) +json_id + \
            Optional(Suppress(",") + aliases) + Suppress(":") + \
            INDENT + body + UNDENT)\
           .setResultsName("rules", listAllMatches=True)

    return OneOrMore(rule | include | comment.suppress())
Esempio n. 24
0
    class Grammar:
        """ Lexical grammar of SCS definition (.sii) file """
        class Parse:
            """ Helper class holding static methods that prepend type information """
            @staticmethod
            def int(toks):
                """ Parse an ordinary int value """
                toks[0] = int(toks[0])
                return toks

            @staticmethod
            def float(toks):
                """ Parse an ordinary float or little endian hex string as a 4-byte float """
                if toks[0].startswith('&'):
                    binary = bytes.fromhex(toks[0][1:])
                    toks[0] = struct.unpack('>f', binary)[0]
                else:
                    toks[0] = float(toks[0])
                return toks

            @staticmethod
            def bool(toks):
                """ Parse bool True or False value """
                toks[0] = (toks[0] == 'true')
                return toks

            @staticmethod
            def reference(toks):
                """ Parse delayed cross reference to an entry """
                toks[0] = DefinitionFile.Reference(toks[0])
                return toks

            @staticmethod
            def tuple(toks):
                """ Parse a tuple"""
                toks[0] = tuple(toks[0])
                return toks

            @staticmethod
            def include(toks):
                """ Include content of another definition file """
                pass

        identifier = Word(alphanums + '_')
        name = Optional(Suppress('"')) + Word(alphanums + '.' +
                                              '_') + Optional(Suppress('"'))

        intValue = Word(nums + '-', nums).setParseAction(Parse.int)
        int = identifier + Suppress(':') + intValue
        int.setParseAction(lambda toks: toks.insert(0, 'int'))

        binaryFloat = Word('&', hexnums)
        regularFloat = Word(nums + '-', nums + '.' + 'eE' + '-')
        floatValue = (regularFloat ^ binaryFloat).setParseAction(Parse.float)
        float = identifier + Suppress(':') + floatValue
        float.setParseAction(lambda toks: toks.insert(0, 'float'))

        boolValue = (Keyword('true') ^ Keyword('false')).setParseAction(
            Parse.bool)
        bool = identifier + Suppress(':') + boolValue
        bool.setParseAction(lambda toks: toks.insert(0, 'bool'))

        textValue = QuotedString('"', multiline=True) ^ identifier
        text = identifier + Suppress(':') + textValue
        text.setParseAction(lambda toks: toks.insert(0, 'text'))

        tupleValue = Group(
            Suppress('(') + delimitedList(intValue ^ floatValue, delim=',') +
            Suppress(')'))
        tupleValue.setParseAction(Parse.tuple)
        tuple = identifier + Suppress(':') + tupleValue
        tuple.setParseAction(lambda toks: toks.insert(0, 'tuple'))

        referenceValue = Word(alphanums + '.' + '_').setParseAction(
            Parse.reference)
        reference = identifier + Suppress(':') + referenceValue
        reference.setParseAction(lambda toks: toks.insert(0, 'reference'))

        arrayValue = (intValue ^ floatValue ^ boolValue ^ textValue
                      ^ tupleValue ^ referenceValue)
        array = Combine(identifier + Suppress('[' + Optional(intValue) + ']')
                        ) + Suppress(':') + arrayValue
        array.setParseAction(lambda toks: toks.insert(0, 'array'))

        label = Group(identifier + Suppress(':') + name)
        property = Group(int ^ float ^ bool ^ text ^ tuple ^ reference ^ array)
        include = Suppress(
            Keyword('@include')) + QuotedString('"').setParseAction(
                Parse.include)
        entry = label + Suppress('{') + ZeroOrMore(property
                                                   ^ include) + Suppress('}')

        junk = ZeroOrMore(CharsNotIn(alphanums))
        header = Suppress(junk + Optional(Keyword('SiiNunit') + '{'))
        footer = Suppress(Optional('}'))

        file = header + ZeroOrMore(Group(entry ^ include)) + footer
        file.ignore(cStyleComment)
        file.ignore(dblSlashComment)
        file.ignore(pythonStyleComment)

        @classmethod
        def tokenize(cls, string: str) -> list:
            """ Perform lexical analysis and return the list of discovered tokens """
            return cls.file.parseString(string, parseAll=True).asList()
Esempio n. 25
0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from pyparsing import (CharsNotIn, Group, Forward, Literal, Suppress, Word,
                       QuotedString, ZeroOrMore, alphas, alphanums)
from string import Template
import re

# Grammar for CMake
comment = Literal('#') + ZeroOrMore(CharsNotIn('\n'))
quoted_argument = QuotedString('\"', '\\', multiline=True)
unquoted_argument = CharsNotIn('\n ()#\"\\')
argument = quoted_argument | unquoted_argument | Suppress(comment)
arguments = Forward()
arguments << (argument | (Literal('(') + ZeroOrMore(arguments) + Literal(')')))
identifier = Word(alphas, alphanums+'_')
command = Group(identifier + Literal('(') + ZeroOrMore(arguments) + Literal(')'))
file_elements = command | Suppress(comment)
cmake = ZeroOrMore(file_elements)


def extract_arguments(parsed):
    """Extract the command arguments skipping the parentheses"""
    return parsed[2:len(parsed) - 1]


def match_block(command, parsed, start):
    """Find the end of block starting with the command"""
    depth = 0
    end = start + 1
    endcommand = 'end' + command
Esempio n. 26
0
    from evdev.ecodes import ecodes
except ImportError:
    ecodes = None
    print('WARNING: evdev is not available')

try:
    from functools import lru_cache
except ImportError:
    # don't do caching on old python
    lru_cache = lambda: (lambda f: f)

EOL = LineEnd().suppress()
EMPTYLINE = LineEnd()
COMMENTLINE = pythonStyleComment + EOL
INTEGER = Word(nums)
STRING = QuotedString('"')
REAL = Combine((INTEGER + Optional('.' + Optional(INTEGER))) ^ ('.' + INTEGER))
SIGNED_REAL = Combine(Optional(Word('-+')) + REAL)
UDEV_TAG = Word(string.ascii_uppercase, alphanums + '_')

TYPES = {
    'mouse': ('usb', 'bluetooth', 'ps2', '*'),
    'evdev': ('name', 'atkbd', 'input'),
    'id-input': ('modalias'),
    'touchpad': ('i8042', 'rmi', 'bluetooth', 'usb'),
    'joystick': ('i8042', 'rmi', 'bluetooth', 'usb'),
    'keyboard': ('name', ),
    'sensor': ('modalias', ),
}

Esempio n. 27
0
    def __init__(self, funcname, selected_fields=False, userargs=None,
                 default_fields=None, parent=None, example=None, text=None):
        """funcname is name the function you want to use(can be either string, or functions.py function).
        if combotags is true then a combobox with tags that the user can choose from are shown.
        userargs is the default values you want to fill the controls in the dialog with
        [make sure they don't exceed the number of arguments of funcname]."""
        QWidget.__init__(self, parent)
        identifier = QuotedString('"') | Combine(Word
                                                 (alphanums + ' !"#$%&\'()*+-./:;<=>?@[\\]^_`{|}~'))
        tags = delimitedList(identifier)
        self.func = Function(funcname)
        docstr = self.func.doc[1:]
        self.vbox = QVBoxLayout()
        self.retval = []
        self._selectedFields = selected_fields

        if selected_fields:
            fields = ['__all'] + sorted(INFOTAGS) + \
                     selected_fields + gettaglist()
        else:
            fields = ['__selected', '__all'] + sorted(INFOTAGS) + \
                     gettaglist()

        self.tagcombo = QComboBox(self)
        self.tagcombo.setToolTip(FIELDS_TOOLTIP)
        self.tagcombo.setEditable(True)
        self.tagcombo.setCompleter(QCompleter(self.tagcombo))
        self.tagcombo.addItems(fields)

        self.tagcombo.editTextChanged.connect(self.showexample)

        if self.func.function not in functions.no_fields:
            label = QLabel(translate('Defaults', "&Fields"))
            self.vbox.addWidget(label)
            self.vbox.addWidget(self.tagcombo)
            label.setBuddy(self.tagcombo)
        else:
            self.tagcombo.setVisible(False)
        self.example = example
        self._text = text

        if self.func.function in functions_dialogs.dialogs:
            vbox = QVBoxLayout()
            vbox.addWidget(self.tagcombo)
            self.widget = functions_dialogs.dialogs[self.func.function](self)
            vbox.addWidget(self.widget)
            vbox.addStretch()
            self.setLayout(vbox)
            self.setMinimumSize(self.sizeHint())

            self.setArguments(default_fields, userargs)
            return
        else:
            self.widget = None

        self.textcombos = []
        # Loop that creates all the controls
        self.controls = []
        for argno, line in enumerate(docstr):
            args = tags.parseString(line)
            label = args[0]
            ctype = args[1]
            default = args[2:]

            control, func, label = self._createControl(label, ctype, default)

            self.retval.append(func)
            self.controls.append(control)
            getattr(control, self.signals[ctype]).connect(self.showexample)

            if label:
                self.vbox.addWidget(label)
            self.vbox.addWidget(control)

        self.setArguments(default_fields, userargs)

        self.vbox.addStretch()
        self.setLayout(self.vbox)
        self.setMinimumSize(self.sizeHint())
Esempio n. 28
0

def convertTuple(t):
    return tuple(t[0])


omcRecord = Forward()
omcValue = Forward()

TRUE = Keyword("true").setParseAction(replaceWith(True))
FALSE = Keyword("false").setParseAction(replaceWith(False))
NONE = (Keyword("NONE") + Suppress("(") + Suppress(")")).setParseAction(
    replaceWith(None))
SOME = (Suppress(Keyword("SOME")) + Suppress("(") + omcValue + Suppress(")"))

omcString = QuotedString(quoteChar='"', escChar='\\',
                         multiline=True).setParseAction(convertString)
omcNumber = Combine(
    Optional('-') + ('0' | Word('123456789', nums)) +
    Optional('.' + Word(nums)) +
    Optional(Word('eE', exact=1) + Word(nums + '+-', nums)))

ident = Word(alphas + "_", alphanums + "_") | Combine(
    "'" + Word(alphanums + "!#$%&()*+,-./:;<>=?@[]^{}|~ ") + "'")
fqident = Forward()
fqident << ((ident + "." + fqident) | ident)
omcValues = delimitedList(omcValue)
omcTuple = Group(Suppress('(') + Optional(omcValues) +
                 Suppress(')')).setParseAction(convertTuple)
omcArray = Group(Suppress('{') + Optional(omcValues) +
                 Suppress('}')).setParseAction(convertTuple)
omcValue << (omcString | omcNumber | omcRecord | omcArray | omcTuple | SOME
Esempio n. 29
0
def graph_definition():

    global graphparser
    
    if not graphparser:
    
        # punctuation
        colon  = Literal(":")
        lbrace = Literal("{")
        rbrace = Literal("}")
        lbrack = Literal("[")
        rbrack = Literal("]")
        lparen = Literal("(")
        rparen = Literal(")")
        equals = Literal("=")
        comma  = Literal(",")
        dot    = Literal(".")
        slash  = Literal("/")
        bslash = Literal("\\")
        star   = Literal("*")
        semi   = Literal(";")
        at     = Literal("@")
        minus  = Literal("-")
        
        # keywords
        strict_    = CaselessLiteral("strict")
        graph_     = CaselessLiteral("graph")
        digraph_   = CaselessLiteral("digraph")
        subgraph_  = CaselessLiteral("subgraph")
        node_      = CaselessLiteral("node")
        edge_      = CaselessLiteral("edge")
        
        
        # token definitions
        
        identifier = Word(alphanums + "_." ).setName("identifier")
        
        double_quoted_string = QuotedString('"', multiline=True, unquoteResults=False) # dblQuotedString

        alphastring_ = OneOrMore(CharsNotIn(_noncomma + ' '))

        def parse_html(s, loc, toks):
            return '<%s>' % ''.join(toks[0])
            
        
        opener = '<'
        closer = '>'
        html_text = nestedExpr( opener, closer, 
            ( CharsNotIn( opener + closer )  ) 
                ).setParseAction(parse_html).leaveWhitespace()

        ID = ( identifier | html_text | 
            double_quoted_string | #.setParseAction(strip_quotes) |
            alphastring_ ).setName("ID")
            
        
        float_number = Combine(Optional(minus) +	
            OneOrMore(Word(nums + "."))).setName("float_number")
            
        righthand_id =  (float_number | ID ).setName("righthand_id")

        port_angle = (at + ID).setName("port_angle")
        
        port_location = (OneOrMore(Group(colon + ID)) |	
            Group(colon + lparen + ID + comma + ID + rparen)).setName("port_location")
            
        port = (Group(port_location + Optional(port_angle)) |	
            Group(port_angle + Optional(port_location))).setName("port")
            
        node_id = (ID + Optional(port))
        a_list = OneOrMore(ID + Optional(equals + righthand_id) +
            Optional(comma.suppress())).setName("a_list")
        
        attr_list = OneOrMore(lbrack.suppress() + Optional(a_list) +	
            rbrack.suppress()).setName("attr_list")
        
        attr_stmt = (Group(graph_ | node_ | edge_) + attr_list).setName("attr_stmt")
        
        edgeop = (Literal("--") | Literal("->")).setName("edgeop")
        
        stmt_list = Forward()
        graph_stmt = Group(lbrace.suppress() + Optional(stmt_list) +	
            rbrace.suppress() + Optional(semi.suppress()) ).setName("graph_stmt")
            
            
        edge_point = Forward()
        
        edgeRHS = OneOrMore(edgeop + edge_point)
        edge_stmt = edge_point + edgeRHS + Optional(attr_list)
        
        subgraph = Group(subgraph_ + Optional(ID) + graph_stmt).setName("subgraph")
        
        edge_point << Group( subgraph | graph_stmt | node_id ).setName('edge_point')
        
        node_stmt = (node_id + Optional(attr_list) + Optional(semi.suppress())).setName("node_stmt")
        
        assignment = (ID + equals + righthand_id).setName("assignment")
        stmt =  (assignment | edge_stmt | attr_stmt | subgraph | graph_stmt | node_stmt).setName("stmt")
        stmt_list << OneOrMore(stmt + Optional(semi.suppress()))
        
        graphparser = OneOrMore( (Optional(strict_) + Group((graph_ | digraph_)) +
            Optional(ID) + graph_stmt).setResultsName("graph") )
        
        singleLineComment = Group("//" + restOfLine) | Group("#" + restOfLine)
        
        
        # actions
        
        graphparser.ignore(singleLineComment)
        graphparser.ignore(cStyleComment)
        
        assignment.setParseAction(push_attr_list)
        a_list.setParseAction(push_attr_list)
        edge_stmt.setParseAction(push_edge_stmt)
        node_stmt.setParseAction(push_node_stmt)
        attr_stmt.setParseAction(push_default_stmt)
        
        subgraph.setParseAction(push_subgraph_stmt)
        graph_stmt.setParseAction(push_graph_stmt)
        graphparser.setParseAction(push_top_graph_stmt)
        
    
    return graphparser
Esempio n. 30
0
# PARSER
def ddlWord(string):
    return WordStart(alphanums +
                     "_") + CaselessLiteral(string) + WordEnd(alphanums + "_")


# This function should be refactored if we find some database function which needs parameters
# Right now it works only for something like NOW() in MySQL default field value
def ddlFunctionWord(string):
    return CaselessLiteral(string) + OneOrMore("(") + ZeroOrMore(
        " ") + OneOrMore(")")


ddlString = Or(
    [QuotedString("'"),
     QuotedString("\"", escQuote='""'),
     QuotedString("`")])
negativeSign = Literal('-')
ddlNum = Combine(Optional(negativeSign) + Word(nums + "."))
ddlTerm = Word(alphanums + "_$")
ddlName = Or([ddlTerm, ddlString])
ddlMathOp = Word("+><=-")
ddlBoolean = Or([ddlWord("AND"), ddlWord("OR"), ddlWord("NOT")])
ddlArguments = "(" + delimitedList(Or([ddlString, ddlTerm, ddlNum])) + ")"
ddlMathCond = Optional("(") + delimitedList(Or([
    Group(ddlName + ddlMathOp + ddlName),
    Group(ddlName + Optional(ddlWord("IS")) + ddlWord("NOT") +
          ddlWord("NULL")),
]),
                                            delim=ddlBoolean) + Optional(")")