Example #1
0
def compile():
    LBRACE, RBRACE, LBRACK, RBRACK, COLON = map(pp.Suppress, '{}[]:')

    value = pp.Forward()

    true = pp.Keyword('true').setParseAction(pp.replaceWith(True))
    false = pp.Keyword('false').setParseAction(pp.replaceWith(False))
    null = pp.Keyword('null').setParseAction(pp.replaceWith(None))
    number = (pp.Regex(
        r'-?(0|[1-9][0-9]*)(\.[0-9]+)?([eE][-+]?[0-9]+)?').setParseAction(
            pp.tokenMap(float)))
    string = (pp.Regex(
        r'"([ !#-\[\]-\U0010ffff]+'
        r'|\\(?:["\\/bfnrt]|u[0-9A-Fa-f]{4}))*"').setParseAction(
            pp.tokenMap(json_unescape)))

    items = pp.delimitedList(value)
    array = (pp.Group(LBRACK - pp.Optional(items) +
                      RBRACK).setParseAction(lambda t: t.asList()))

    member = pp.Group(string + COLON + value)
    members = pp.delimitedList(member)
    object = (pp.Dict(LBRACE - pp.Optional(members) +
                      RBRACE).setParseAction(lambda t: t.asDict()))

    value << (object | array | string | number | true | false | null)

    json = value('top') + pp.StringEnd()
    json.setDefaultWhitespaceChars(' \t\n\r')
    json.parseWithTabs()

    return lambda s: json.parseString(s)['top']
Example #2
0
class TestParseAction(PyparsingExpressionTestCase):
    tests = [
        PpTestSpec(
            desc = "Match with numeric string converted to int",
            expr = pp.Word("0123456789").addParseAction(lambda t: int(t[0])),
            text = "12345",
            expected_list = [12345],  # note - result is type int, not str 
        ),
        PpTestSpec(
            desc = "Use two parse actions to convert numeric string, then convert to datetime",
            expr = pp.Word(pp.nums).addParseAction(lambda t: int(t[0]), 
                                                   lambda t: datetime.utcfromtimestamp(t[0])),
            text = "1537415628",
            expected_list = [datetime(2018, 9, 20, 3, 53, 48)],
        ),
        PpTestSpec(
            desc = "Use tokenMap for parse actions that operate on a single-length token",
            expr = pp.Word(pp.nums).addParseAction(pp.tokenMap(int), 
                                                   pp.tokenMap(datetime.utcfromtimestamp)),
            text = "1537415628",
            expected_list = [datetime(2018, 9, 20, 3, 53, 48)],
        ),
        PpTestSpec(
            desc = "Using a built-in function that takes a sequence of strs as a parse action",
            expr = pp.OneOrMore(pp.Word(pp.hexnums, exact=2)).addParseAction(':'.join),
            text = "0A4B7321FE76",
            expected_list = ['0A:4B:73:21:FE:76'],
        ),
        PpTestSpec(
            desc = "Using a built-in function that takes a sequence of strs as a parse action",
            expr = pp.OneOrMore(pp.Word(pp.hexnums, exact=2)).addParseAction(sorted),
            text = "0A4B7321FE76",
            expected_list = ['0A', '21', '4B', '73', '76', 'FE'],
        ),
    ]
def table_row(start_tag, end_tag):
    body = start_tag.tag_body
    body.addParseAction(pp.tokenMap(str.strip), pp.tokenMap(strip_html))
    row = pp.Group(tr.suppress() + pp.ZeroOrMore(start_tag.suppress() + body +
                                                 end_tag.suppress()) +
                   tr_end.suppress())
    return row
Example #4
0
class TestParseAction(PyparsingExpressionTestCase):
    tests = [
        PpTestSpec(
            desc=
            "Parsing real numbers - use parse action to convert to float at parse time",
            expr=pp.Combine(pp.Word(pp.nums) + "." +
                            pp.Word(pp.nums)).addParseAction(
                                lambda t: float(t[0]))[...],
            text="1.2 2.3 3.1416 98.6",
            expected_list=[
                1.2,
                2.3,
                3.1416,
                98.6,
            ],  # note, these are now floats, not strs
        ),
        PpTestSpec(
            desc="Match with numeric string converted to int",
            expr=pp.Word("0123456789").addParseAction(lambda t: int(t[0])),
            text="12345",
            expected_list=[12345],  # note - result is type int, not str
        ),
        PpTestSpec(
            desc=
            "Use two parse actions to convert numeric string, then convert to datetime",
            expr=pp.Word(pp.nums).addParseAction(
                lambda t: int(t[0]),
                lambda t: datetime.utcfromtimestamp(t[0])),
            text="1537415628",
            expected_list=[datetime(2018, 9, 20, 3, 53, 48)],
        ),
        PpTestSpec(
            desc=
            "Use tokenMap for parse actions that operate on a single-length token",
            expr=pp.Word(pp.nums).addParseAction(
                pp.tokenMap(int), pp.tokenMap(datetime.utcfromtimestamp)),
            text="1537415628",
            expected_list=[datetime(2018, 9, 20, 3, 53, 48)],
        ),
        PpTestSpec(
            desc=
            "Using a built-in function that takes a sequence of strs as a parse action",
            expr=pp.Word(pp.hexnums, exact=2)[...].addParseAction(":".join),
            text="0A4B7321FE76",
            expected_list=["0A:4B:73:21:FE:76"],
        ),
        PpTestSpec(
            desc=
            "Using a built-in function that takes a sequence of strs as a parse action",
            expr=pp.Word(pp.hexnums, exact=2)[...].addParseAction(sorted),
            text="0A4B7321FE76",
            expected_list=["0A", "21", "4B", "73", "76", "FE"],
        ),
    ]
Example #5
0
    def __init__(self):
        """constructor"""
        """make LAD parser"""
        self.NwNumber = pp.Word(pp.nums, max=1).setParseAction(pp.tokenMap(int)).setBreak(False)
        self.Nw = pp.CaselessLiteral('NW:') + self.NwNumber + pp.Suppress(pp.lineEnd())
        self.Ope_I = pp.Combine(pp.CaselessLiteral('I') + pp.Word(pp.nums, max=2))
        self.Ope_O = pp.Combine(pp.CaselessLiteral('O') + pp.Word(pp.nums, max=2))
        self.Ope_M = pp.Combine(pp.CaselessLiteral('M') + pp.Word(pp.nums, max=2))
        self.Ope = self.Ope_I | self.Ope_O | self.Ope_M

        self.Command_LD = (pp.CaselessKeyword('LDN') | pp.CaselessKeyword ('LD')) + self.Ope + pp.Suppress(pp.lineEnd())
        self.Command_AND = (pp.CaselessKeyword('ANDN') | pp.CaselessKeyword ('AND')) + self.Ope + pp.Suppress(pp.lineEnd())
        self.Command_OR = (pp.CaselessKeyword('ORN') | pp.CaselessKeyword('OR')) + self.Ope + pp.Suppress(pp.lineEnd())
        self.Command_OUT = pp.CaselessKeyword('OUT') + self.Ope + pp.Suppress(pp.lineEnd())

        self.Command_BSAND = pp.CaselessKeyword('BSAND') + pp.Suppress(pp.lineEnd())
        self.Command_BFAND = pp.CaselessKeyword('BFAND') + pp.Suppress(pp.lineEnd())
        self.Command_BSOR = pp.CaselessKeyword('BSOR') + pp.Suppress(pp.lineEnd())
        self.Command_BFOR = pp.CaselessKeyword('BFOR') + pp.Suppress(pp.lineEnd())

        self.Command_LDOR = self.Command_LD + self.Command_OR * (0, 7)
        self.Command_ANDOR = self.Command_AND + self.Command_OR * (0, 7)
        self.Command_LDAND  = self.Command_LDOR + self.Command_ANDOR * (0, 7)

        self.Complex = pp.Forward()
        self.Block = pp.Group((self.Complex | self.Command_LDAND) + pp.Optional(self.Command_ANDOR * (0, 7)))
        self.ComplexOR = self.Command_BSOR + self.Block + self.Block + self.Command_BFOR
        self.ComplexAND = self.Command_BSAND + self.Block + self.Block + self.Command_BFAND
        self.Complex <<= self.ComplexOR | self.ComplexAND

        self.NwProgram = pp.Group(self.Nw + self.Block + self.Command_OUT)

        self.Program = pp.OneOrMore(self.NwProgram)
Example #6
0
def _parse(mystr):

    LBRACE, RBRACE, EQUAL = map(pp.Suppress, "{}=")
    field = pp.Word(pp.printables + ' ', excludeChars='[]=')
    field.addParseAction(pp.tokenMap(str.rstrip))
    string = pp.dblQuotedString().setParseAction(pp.removeQuotes)
    number = pp.pyparsing_common.number()
    date_expr = pp.Regex(r'\d\d\d\d-\d\d-\d\d')
    time_expr = pp.Regex(r'\d\d:\d\d:\d\d\.\d\d\d')
    nan = pp.Keyword('nan')
    scalar_value = (string | date_expr | time_expr | number | nan)

    list_marker = pp.Suppress("[]")
    value_list = pp.Forward()
    jobject = pp.Forward()

    memberDef1 = pp.Group(field + EQUAL + scalar_value)
    memberDef2 = pp.Group(field + EQUAL + jobject)
    memberDef3 = pp.Group(field + list_marker + EQUAL + LBRACE + value_list +
                          RBRACE)
    memberDef = memberDef1 | memberDef2 | memberDef3

    value_list <<= (pp.delimitedList(scalar_value, ",") |
                    pp.ZeroOrMore(pp.Group(pp.Dict(memberDef2))))
    value_list.setParseAction(lambda t: [pp.ParseResults(t[:])])

    members = pp.OneOrMore(memberDef)
    jobject <<= pp.Dict(LBRACE + pp.ZeroOrMore(memberDef) + RBRACE)
    # force empty jobject to be a dict
    jobject.setParseAction(lambda t: t or {})

    parser = members
    parser = pp.OneOrMore(pp.Group(pp.Dict(memberDef)))

    return parser.parseString(mystr)
Example #7
0
    def compute(self, text, verbose=True):

        vbar = Literal("|")
        hlinkCap = vbar + Literal("H")
        hlink = vbar + Literal("h")
        dollar = Literal('$')
        lbrack = Literal("[")
        rbrack = Literal("]")
        exclamationMark = Literal("!")

        numberInt = Word(nums).addParseAction(tokenMap(int))
        #Condition
        conditionParser = (
            dollar + lbrack + exclamationMark +
            OneOrMore(numberInt + Optional(Literal(",").suppress())
                      ).setResultsName("difficulties") +
            SkipTo(dollar).setResultsName("text") +
            Suppress(dollar + rbrack)).addParseAction(self.setCondition)

        # Hyperlink
        hyperlinkParser = (Suppress(hlinkCap) +
                           SkipTo(hlink).setResultsName("link") +
                           Suppress(hlink) +
                           SkipTo(hlink).setResultsName("anchor") +
                           Suppress(hlink)).addParseAction(self.setHyperlink)

        # Parsing layer by layer
        parsingOrder = [conditionParser, hyperlinkParser]
        steps = [text]
        for parser in parsingOrder:
            steps.append(parser.transformString(steps[-1]))
        result = steps[-1]
        return super(EncounterSectionParser, self).compute(result, verbose)
Example #8
0
    def _parse_tables(report_str: str) -> Dict[str, str]:
        """Parse the tables from a fitter report

        Keys are the title of the table, values are the table body
        """

        hline = pp.lineStart() + pp.Word("+", "+-") + pp.lineEnd()

        title = (
            pp.lineStart()
            + ";"
            + pp.SkipTo(";")("title").setParseAction(pp.tokenMap(str.strip))
            + ";"
            + pp.lineEnd()
        )

        # Grab everything until the next horizontal line(s). Tables with
        # column headings will have a horizontal line after the headings and
        # at the end of the table. Odd tables without section headings will
        # only have a single horizontal line.
        data = pp.SkipTo(hline, failOn=pp.lineEnd() * 2, include=True)

        table = hline + title + pp.Combine(hline + data * (1, 2))("body")

        # Make line endings significant
        table.setWhitespaceChars(" \t")

        result = {t.title: t.body for t in table.searchString(report_str)}

        return result
Example #9
0
def _parse(mystr):

    LBRACE, RBRACE, EQUAL = map(pp.Suppress, "{}=")
    field = pp.Word(pp.printables + ' ', excludeChars='[]=')
    field.addParseAction(pp.tokenMap(str.rstrip))
    string = pp.dblQuotedString().setParseAction(pp.removeQuotes)
    number = pp.pyparsing_common.number()
    date_expr = pp.Regex(r'\d\d\d\d-\d\d-\d\d')
    time_expr = pp.Regex(r'\d\d:\d\d:\d\d\.\d\d\d')
    scalar_value = (string | date_expr | time_expr | number)

    list_marker = pp.Suppress("[]")
    value_list = pp.Forward()
    jobject = pp.Forward()

    memberDef1 = pp.Group(field + EQUAL + scalar_value)
    memberDef2 = pp.Group(field + EQUAL + jobject)
    memberDef3 = pp.Group(field + list_marker + EQUAL + LBRACE + value_list +
                          RBRACE)
    memberDef = memberDef1 | memberDef2 | memberDef3

    value_list <<= (pp.delimitedList(scalar_value, ",")
                    | pp.ZeroOrMore(pp.Group(pp.Dict(memberDef2))))
    value_list.setParseAction(lambda t: [pp.ParseResults(t[:])])

    members = pp.OneOrMore(memberDef)
    jobject <<= pp.Dict(LBRACE + pp.ZeroOrMore(memberDef) + RBRACE)
    # force empty jobject to be a dict
    jobject.setParseAction(lambda t: t or {})

    parser = members
    parser = pp.OneOrMore(pp.Group(pp.Dict(memberDef)))

    return parser.parseString(mystr)
    def parse_as_create_predictor(self) -> dict:
        CREATE, PREDICTOR, FROM, WHERE, PREDICT, AS, ORDER, GROUP, BY, WINDOW, HORIZON, USING, ASK, DESC = map(
            CaselessKeyword,
            "CREATE PREDICTOR FROM WHERE PREDICT AS ORDER GROUP BY WINDOW HORIZON USING ASK DESC"
            .split())
        ORDER_BY = ORDER + BY
        GROUP_BY = GROUP + BY

        word = Word(alphanums + "_")

        s_int = Word(nums).setParseAction(tokenMap(int))

        predict_item = Group(
            word('name') + Optional(AS.suppress() + word('alias')))

        order_item = Group(word('name') + Optional(ASK | DESC)('sort'))

        using_item = Group(
            word('name') + Word('=').suppress() +
            (word | QuotedString("'"))('value'))

        expr = (
            CREATE + PREDICTOR + word('predictor_name') + FROM +
            Optional(word)('integration_name') +
            originalTextFor(nestedExpr('(', ')'))('select') +
            Optional(AS + word('datasource_name')) + PREDICT +
            delimitedList(predict_item, delim=',')('predict') +
            Optional(ORDER_BY +
                     delimitedList(order_item, delim=',')('order_by')) +
            Optional(GROUP_BY + delimitedList(word, delim=',')('group_by')) +
            Optional(WINDOW + s_int('window')) +
            Optional(HORIZON + s_int('nr_predictions')) + Optional(
                (USING + delimitedList(using_item, delim=',')('using'))
                | (USING + originalTextFor(nestedExpr('{', '}'))('using'))))

        r = expr.parseString(self._sql)

        # postprocessing
        r = r.asDict()
        if r['select'].startswith('(') and r['select'].endswith(')'):
            r['select'] = r['select'][1:-1]
        r['select'] = r['select'].strip(' \n')

        using = r.get('using')
        if isinstance(using, str):
            r['using'] = json.loads(using)
        elif isinstance(using, list):
            new_using = {}
            for el in using:
                if el['name'] == 'stop_training_in_x_seconds':
                    new_using['time_aim'] = el['value']
                else:
                    new_using[el['name']] = el['value']
            r['using'] = new_using

        if isinstance(r.get('order_by'), list):
            r['order_by'] = [x['name'] for x in r['order_by']]

        return r
Example #11
0
    def _parse_map_tables(report_str: str) -> Dict[str, str]:
        """
        Parse the tables from a ISE map report.

        Keys are the title of the table, values are the table body.
        """

        # Capture the title from section headings like:
        #
        # Section 12 - Control Set Information
        # ------------------------------------

        title = (
            pp.lineStart()
            + "Section"
            + ppc.integer
            + "-"
            + pp.SkipTo(pp.lineEnd())("title").setParseAction(pp.tokenMap(str.strip))
            + pp.lineEnd()
        )

        sec_hline = pp.Suppress(pp.lineStart() + pp.Word("-") + pp.lineEnd() * (1,))

        # Table horizontal lines like
        # +-------------------------------+
        hline = pp.lineStart() + pp.Word("+", "+-") + pp.lineEnd()

        # Most tables will have the format
        # +-----------------------+
        # | Col 1 | Col 2 | Col 3 |
        # +-----------------------+
        # | D1    | D2    | D3    |
        # ...
        # +-----------------------+
        #
        # However "Control Set Information" appears to use horizontal lines to
        # separate clocks within the data section. Therefore, just grab
        # everything until a horizontal line followed by a blank line rather
        # than something more precise.

        table = pp.Combine(hline + pp.SkipTo(hline + pp.LineEnd(), include=True))(
            "body"
        )
        table_section = title + sec_hline + table

        # Make line endings significant
        table_section.setWhitespaceChars(" \t")

        result = {t.title: t.body for t in table_section.searchString(report_str)}

        return result
Example #12
0
 def getLiteral(self):
     '''
     get the literal sub Grammar
     '''
     uri=Regex(SiDIFParser.getUriRegexp())('uri')
     booleanLiteral=oneOf(["true","false"]).setParseAction(self.convertToBoolean)('boolean')
     hexLiteral=(Suppress("0x")+(Word(hexnums).setParseAction(tokenMap(int, 16))))('hexLiteral')
     integerLiteral=pyparsing_common.signed_integer('integerLiteral')
     floatingPointLiteral=Group(
         pyparsing_common.sci_real|pyparsing_common.real
     ).setParseAction(self.handleGroup)('floatingPointLiteral')
     timeLiteral=Regex(r"[0-9]{2}:[0-9]{2}(:[0-9]{2})?").setParseAction(self.convertToTime)('timeLiteral')
     dateLiteral=pyparsing_common.iso8601_date.copy().setParseAction(pyparsing_common.convertToDate())('dateLiteral')
     dateTimeLiteral=Group(
         dateLiteral+Optional(timeLiteral)
     ).setParseAction(self.handleDateTimeLiteral)('dateTimeLiteral')
     stringLiteral=Group(
         Suppress('"')+ZeroOrMore(CharsNotIn('"')|LineEnd())+Suppress('"')
     ).setParseAction(self.handleStringLiteral)('stringLiteral')
     literal=Group(
         uri | stringLiteral |  booleanLiteral | hexLiteral | dateTimeLiteral | timeLiteral | floatingPointLiteral| integerLiteral 
     ).setParseAction(self.handleGroup)("literal")
     return literal
Example #13
0
def UINTEGER(name):
    """generate UINTEGER parser"""
    return Word(nums).setParseAction(tokenMap(int)).setName(name)
Example #14
0
    def _parse_twr_period(timing_str: str) -> pp.ParseResults:
        """Parse period constraints from an ISE timing report

        Expects the default ISE verbose output from a command like: ::
            trce -v 3 -n 3 -fastpaths top.ncd top.pcf -o top.twr
        """
        # Look for a section of the report like the following and extract the
        # constraint, path information, and minimum period.
        #
        # ================================================================================
        # Timing constraint: TS_clk = PERIOD TIMEGRP "clk" 150 MHz HIGH 50%;
        # For more information, see Period Analysis in the Timing Closure User Guide (UG612).
        #
        # 39892 paths analyzed, 3774 endpoints analyzed, 632 failing endpoints
        # 632 timing errors detected. (632 setup errors, 0 hold errors, 0 component switching limit errors)
        # Minimum period is  10.877ns.
        # --------------------------------------------------------------------------------
        #
        # or
        #
        # ================================================================================
        # Timing constraint: TS_soclinux_crg_pll_sdram_half_b = PERIOD TIMEGRP
        # "soclinux_crg_pll_sdram_half_b" TS_soclinux_crg_clk50b / 3.33333333
        # PHASE 4.16666667 ns HIGH 50%;
        # For more information, see Period Analysis in the Timing Closure User Guide (UG612).
        #
        #  0 paths analyzed, 0 endpoints analyzed, 0 failing endpoints
        #  0 timing errors detected. (0 component switching limit errors)
        #  Minimum period is   1.730ns.
        # --------------------------------------------------------------------------------

        period = ppc.real("min period") + pp.Suppress("ns")

        # Build up a case-insensitive match for any of the below units
        units = ["ps", "ns", "micro", "ms", "%", "MHz", "GHz", "kHz"]

        pp_units = pp.CaselessLiteral(units[0])
        for u in units[1:]:
            pp_units |= pp.CaselessLiteral(u)

        hl = pp.Literal("HIGH") | pp.Literal("LOW")
        num = ppc.number + pp.Optional(pp_units)
        jitter = pp.Optional("INPUT_JITTER" + num)

        # Remove leading and trailing whitespace and any line breaks
        #
        # SkipTo in the below timespec parser will pickup whitespace including
        # new lines if they are included in the report.
        def remove_ws_and_newlines(s):
            lines = [l.strip() for l in s.splitlines()]
            return " ".join(lines)

        timespec = (pp.Suppress("Timing constraint:") +
                    pp.Word(pp.printables)("timespec") +
                    pp.Suppress("= PERIOD TIMEGRP") +
                    pp.Word(pp.printables)("timegroup") +
                    pp.SkipTo(hl)("constraint").setParseAction(
                        pp.tokenMap(remove_ws_and_newlines)) +
                    pp.Suppress(hl + num + jitter + ";"))

        # Parse the path information from the report like:
        #
        # 0 paths analyzed, 0 endpoints analyzed, 0 failing endpoints
        # 0 timing errors detected. (0 component switching limit errors)
        #
        # or
        #
        # 266 paths analyzed, 235 endpoints analyzed, 0 failing endpoints
        # 0 timing errors detected. (0 setup errors, 0 hold errors, 0 component switching limit errors)
        stats = (
            ppc.integer("paths") + pp.Suppress("paths analyzed,") +
            ppc.integer("endpoints") + pp.Suppress("endpoints analyzed,") +
            ppc.integer("failing") + pp.Suppress("failing endpoints") +
            ppc.integer("timing errors") +
            pp.Suppress("timing errors detected. (") + pp.Optional(
                ppc.integer("setup errors") + pp.Suppress("setup errors,") +
                ppc.integer("hold errors") + pp.Suppress("hold errors,")) +
            ppc.integer("switching limit errors") +
            pp.Suppress("component switching limit errors)"))

        # It's not clear why this doesn't show up for one timing constraint in
        # the LiteX Linux VexRISCV example
        min_period = pp.Optional(pp.Suppress("Minimum period is") + period)

        constraint = timespec + pp.Suppress(
            pp.SkipTo(stats)) + stats + min_period

        result = constraint.searchString(timing_str)

        return result
Example #15
0
    # Color (Distance Type)
    "Gr": "00",  # Gray
    "Bl": "01",  # Black
    "Wh": "10",  # White
}


alpha_upper = string.ascii_uppercase

mnemonic = Word(alpha_upper, bodyChars=alpha_upper + nums).setResultsName("mnemonic")

# XXX can't use pyparsing_common.signedInteger as the latest pyparsing 2.1.5
# has a bug which always converts them to floats. Remove this once 2.1.6 is
# published on PyPI.
signed_integer = (
    Regex(r"[+-]?\d+").setName("signed integer").setParseAction(tokenMap(int))
)

variable = Word(alphas, bodyChars=alphanums)

stack_item = Suppress(",") + (signed_integer | Suppress("*") | variable)

flag = oneOf(list(VTT_MNEMONIC_FLAGS.keys()))
# convert flag to binary string
flag.setParseAction(tokenMap(lambda t: VTT_MNEMONIC_FLAGS[t]))
flags = Combine(OneOrMore(flag)).setResultsName("flags")

delta_point_index = pyparsing_common.integer.setResultsName("point_index")
delta_rel_ppem = pyparsing_common.integer.setResultsName("rel_ppem")
delta_step_no = signed_integer.setResultsName("step_no")
# the step denominator is only used in VTT's DELTA[CP]* instructions,
Example #16
0
from atcgen.instructions.effect import Effect

from pyparsing import (Combine, Word, nums, tokenMap)

SNAP_ARG = Combine(Word(nums)).setParseAction(tokenMap(int)).setName("value")


class SnapEffect(Effect):
    def __init__(self):
        super(SnapEffect, self).__init__()
        self.full_line = None

    def parse(self, args_str):
        parsed_args = SNAP_ARG.parseString(self.args_str, parseAll=True)
        return parsed_args

    @staticmethod
    def help(context):
        return (
            "Snap successive syllables with close enough timings.\n\n"
            "Active by default.\n"
            "arguments: VALUE\n\n"
            "+---------------+---------------+-----------------------------+\n"
            "| argument name | argument type | description                 |\n"
            "+===============+===============+=============================+\n"
            "| VALUE         | integer       | Maximum timing difference   |\n"
            "|               |               | to snap                     |\n"
            "+---------------+---------------+-----------------------------+\n"
            '\n'
            "Snaping is done by moving the beginning of the syllable at the "
            "centisecond following the end of the previous syllable.\n\n"
Example #17
0
    def compute(self, text, verbose=True):

        # Literals
        dollar = Literal('$')
        amper = Literal('&')
        at = Literal('@')
        qm = Literal('?')
        em = Literal('!')
        dot = Literal('.')
        colon = Literal(":")
        vbar = Literal("|")
        lbrack = Literal("[")
        rbrack = Literal("]")
        lcurly = Literal("{")
        rcurly = Literal("}")
        lparen = Literal("(")
        rparen = Literal(")")
        lt = Literal("<")
        gt = Literal(">")
        eq = Literal("=")
        deq = Literal("==")

        # Reusables
        spellId = Word(nums, min=2, max=6).addParseAction(
            tokenMap(int)).setResultsName("spellId")
        idx = Word(nums,
                   max=1).addParseAction(tokenMap(int)).setResultsName("id")
        var = Word(alphas).setResultsName("var")

        # Spell References
        effectId = Optional(
            Word(nums, max=2).addParseAction(
                tokenMap(int)).setResultsName("effectId"))
        references = (dollar.suppress() +
                      ((at.suppress() + var + Optional(spellId)) |
                       (spellId + var + effectId) |
                       (var + effectId))).addParseAction(self.setReferences)

        # Conditions
        brackets = Suppress(lbrack) + SkipTo(rbrack).setResultsName(
            "statement") + Suppress(rbrack)
        value = Word(nums, max=5).addParseAction(
            tokenMap(int)).setResultsName("value")
        conditionVar = Group(
            Optional(em).setResultsName("not") + Optional(var) +
            (spellId | idx)
            | Optional("-") + value
            | Word(alphanums, exact=8).setResultsName("hashVariable"))
        conditions = ((dollar + qm).suppress() + OneOrMore(
            Group(
                Optional(Suppress(qm)) + Optional(Suppress(lparen)) +
                OneOrMore(
                    conditionVar.setResultsName("variables*") +
                    Optional(Combine(em + eq) | amper | vbar | deq | lt
                             | gt).setResultsName("operators*")) +
                Optional(Suppress(rparen)) +
                brackets).setResultsName("conditions*")) +
                      brackets).addParseAction(lambda t: self.setConditions(
                          t, verbose=verbose)) + Optional(dot.suppress())

        # Call Variable
        callVariables = (Suppress((lt + dollar) | (dollar + lt)) +
                         SkipTo(gt).setResultsName("name") +
                         Suppress(gt)).addParseAction(self.callVariables)

        # Expressions
        expressions = (
            Suppress(dollar + lcurly) +
            SkipTo(rcurly).setResultsName("content") + rcurly + Optional(
                dot.suppress() + Word(nums, exact=1).addParseAction(
                    tokenMap(int)).setResultsName("mod"), )
        ).addParseAction(lambda t: self.setExpressions(t, verbose=verbose))

        # Language Choices
        languageChoices = (
            (Literal('$L') | Literal('$l')).suppress() +
            OneOrMore(Word(alphas) + Optional(Literal(":").suppress())
                      ).setResultsName("options*") +
            Literal(';').suppress()).addParseAction(self.setLanguageChoices)

        # Icons
        icons = (Literal("|T").suppress() +
                 SkipTo(colon).setResultsName("path") +
                 colon.suppress() + Word(nums, exact=2).addParseAction(
                     tokenMap(int)).setResultsName("size") +
                 Literal("|t").suppress()).addParseAction(self.setIcons)

        # Parsing layer by layer
        parsingOrder = [
            icons, languageChoices, callVariables, references, expressions,
            conditions
        ]
        steps = [text]
        for parser in parsingOrder:
            steps.append(parser.transformString(steps[-1]))
        result = steps[-1]

        # Replace each Sha1 Hash placeholder by refering value
        if verbose:
            for k, v in self.variables.items():
                result = result.replace(k, str(v))

        # Display fixes
        displayFixes = [["*% of", "% of"], ["power)%", "power)"]]
        for bef, aft in displayFixes:
            result = result.replace(bef, aft)

        return super(SpellDescriptionParser, self).compute(result, verbose)
Example #18
0
    # Color (Distance Type)
    "Gr": "00",  # Gray
    "Bl": "01",  # Black
    "Wh": "10",  # White
}


alpha_upper = string.ascii_uppercase

mnemonic = Word(alpha_upper, bodyChars=alpha_upper + nums).setResultsName("mnemonic")

# XXX can't use pyparsing_common.signedInteger as the latest pyparsing 2.1.5
# has a bug which always converts them to floats. Remove this once 2.1.6 is
# published on PyPI.
signed_integer = (
    Regex(r"[+-]?\d+").setName("signed integer").setParseAction(tokenMap(int))
)

variable = Word(alphas, bodyChars=alphanums)

stack_item = Suppress(",") + (signed_integer | Suppress("*") | variable)

flag = oneOf(list(VTT_MNEMONIC_FLAGS.keys()))
# convert flag to binary string
flag.setParseAction(tokenMap(lambda t: VTT_MNEMONIC_FLAGS[t]))
flags = Combine(OneOrMore(flag)).setResultsName("flags")

delta_point_index = pyparsing_common.integer.setResultsName("point_index")
delta_rel_ppem = pyparsing_common.integer.setResultsName("rel_ppem")
delta_step_no = signed_integer.setResultsName("step_no")
# the step denominator is only used in VTT's DELTA[CP]* instructions,
Example #19
0
import pyparsing as pp

Integer = pp.Word(pp.nums).setParseAction(pp.tokenMap(int))
Axis = pp.Word("xy")
Range = (Integer + pp.Suppress("..") + Integer)
Value = Integer ^ Range
Constraint = pp.Group(Axis("axis") + pp.Suppress("=") + Value("value"))
Line = Constraint + pp.Suppress(",") + Constraint


class World:
    def __init__(self):
        self.grid = {}
        self.spring = (0, 500)
        self.max_row = None
        self.min_row = None

    def draw(self):
        points = self.grid.keys()
        rows = [p[0] for p in points]
        cols = [p[1] for p in points]
        max_row = max(rows)
        min_col, max_col = min(cols), max(cols)
        for row in range(0, max_row + 1):
            line = ""
            for col in range(min_col, max_col + 1):
                if (row, col) in self.grid:
                    line += self.grid.get((row, col))
                else:
                    line += "."
            print(line)
Example #20
0
import functools

from pyparsing import (ZeroOrMore, OneOrMore, Word, Suppress, Forward, Group,
                       tokenMap, nums, alphas, Literal)

test_inputs = ["inputs/day19_part2"]

Integer = Word(nums).setParseAction(tokenMap(int))
CharacterExpr = Suppress("\"") + Word(alphas, exact=1) + Suppress("\"")
RuleRefSeq = OneOrMore(Integer, stopOn="\n")
RuleRefExpr = Group(RuleRefSeq) + ZeroOrMore(Suppress("|") + Group(RuleRefSeq))
RuleExpr = CharacterExpr("char") | RuleRefExpr("subrules")
Rule = Integer("index") + Suppress(":") + RuleExpr("value")
Message = Word(alphas)
Line = Rule("rule") | Message("message")


def build_grammar(rules):
    grammar = {}
    for rule in rules:
        grammar[rule["index"]] = Forward()

    for rule in rules:
        index = rule["index"]
        if "char" in rule:
            grammar[index] << (Literal(rule["value"][0]))
        else:
            subrules = rule["subrules"]
            summed_rules = [
                functools.reduce(lambda a, b: a + b,
                                 [grammar[idx] for idx in group])
Example #21
0
def INTEGER(name):
    """generate INTEGER parser"""
    return Combine(Optional('-') + Word(nums)).setParseAction(
        tokenMap(int)).setName(name)
Example #22
0
    # Color (Distance Type)
    "Gr": '00',  # Gray
    "Bl": '01',  # Black
    "Wh": '10',  # White
}

alpha_upper = string.ascii_uppercase

mnemonic = Word(alpha_upper,
                bodyChars=alpha_upper + nums).setResultsName("mnemonic")

# XXX can't use pyparsing_common.signedInteger as the latest pyparsing 2.1.5
# has a bug which always converts them to floats. Remove this once 2.1.6 is
# published on PyPI.
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(
    tokenMap(int))

variable = Word(alphas, bodyChars=alphanums)

stack_item = Suppress(",") + (signed_integer | Suppress("*") | variable)

flag = oneOf(list(VTT_MNEMONIC_FLAGS.keys()))
# convert flag to binary string
flag.setParseAction(tokenMap(lambda t: VTT_MNEMONIC_FLAGS[t]))
flags = Combine(OneOrMore(flag)).setResultsName("flags")

delta_point_index = pyparsing_common.integer.setResultsName("point_index")
delta_rel_ppem = pyparsing_common.integer.setResultsName("rel_ppem")
delta_step_no = signed_integer.setResultsName("step_no")
# the step denominator is only used in VTT's DELTA[CP]* instructions,
# and must always be 8 (sic!), so we can suppress it.
Example #23
0
    def __init__(self):
        ParserElement.enablePackrat()

        unit_years = (CaselessLiteral("years") | CaselessLiteral("year")
                      | CaselessLiteral("y"))
        years = (
            Word(nums).setParseAction(lambda s, l, t: [int(t[0])])("years") +
            unit_years)
        unit_months = (CaselessLiteral("months") | CaselessLiteral("month")
                       | CaselessLiteral("mo"))
        months = (
            Word(nums).setParseAction(lambda s, l, t: [int(t[0])])("months") +
            unit_months)
        unit_weeks = (CaselessLiteral("weeks") | CaselessLiteral("week")
                      | CaselessLiteral("w"))
        weeks = (
            Word(nums).setParseAction(lambda s, l, t: [int(t[0])])("weeks") +
            unit_weeks)
        unit_days = (CaselessLiteral("days") | CaselessLiteral("day")
                     | CaselessLiteral("d"))
        days = (
            Word(nums).setParseAction(lambda s, l, t: [int(t[0])])("days") +
            unit_days)
        unit_hours = (CaselessLiteral("hours")
                      | CaselessLiteral("hour")
                      | CaselessLiteral("hrs")
                      | CaselessLiteral("hr")
                      | CaselessLiteral("h"))
        hours = (
            Word(nums).setParseAction(lambda s, l, t: [int(t[0])])("hours") +
            unit_hours)
        unit_minutes = (CaselessLiteral("minutes")
                        | CaselessLiteral("minute")
                        | CaselessLiteral("mins")
                        | CaselessLiteral("min")
                        | CaselessLiteral("m"))
        minutes = (
            Word(nums).setParseAction(lambda s, l, t: [int(t[0])])("minutes") +
            unit_minutes)
        unit_seconds = (CaselessLiteral("seconds")
                        | CaselessLiteral("second")
                        | CaselessLiteral("secs")
                        | CaselessLiteral("sec")
                        | CaselessLiteral("s"))
        seconds = (
            Word(nums).setParseAction(lambda s, l, t: [int(t[0])])("seconds") +
            unit_seconds)

        time_unit = years | months | weeks | days | hours | minutes | seconds
        time_unit_separators = Optional(Literal(",")) + Optional(
            CaselessLiteral("and"))
        full_time = time_unit + ZeroOrMore(
            Suppress(Optional(time_unit_separators)) + time_unit)

        every_time = Group(CaselessLiteral("every") + full_time)("every")
        in_opt_time = Group(Optional(CaselessLiteral("in")) + full_time)("in")
        in_req_time = Group(CaselessLiteral("in") + full_time)("in")

        reminder_text_capture = SkipTo(every_time | in_req_time
                                       | StringEnd()).setParseAction(
                                           tokenMap(str.strip))
        reminder_text_optional_prefix = Optional(
            Suppress(CaselessLiteral("to")))
        reminder_text = reminder_text_optional_prefix + reminder_text_capture(
            "text")

        in_every_text = in_opt_time + every_time + reminder_text
        every_in_text = every_time + in_req_time + reminder_text
        in_text_every = in_opt_time + reminder_text + every_time
        every_text_in = every_time + reminder_text + in_req_time
        text_in_every = reminder_text + in_req_time + every_time
        text_every_in = reminder_text + every_time + in_req_time

        in_text = in_opt_time + reminder_text
        text_in = reminder_text + in_req_time
        every_text = every_time + reminder_text
        text_every = reminder_text + every_time

        template = (in_every_text
                    | every_in_text
                    | in_text_every
                    | every_text_in
                    | text_in_every
                    | text_every_in
                    | in_text
                    | text_in
                    | every_text
                    | text_every)

        self.parser = template