from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser try: from .etoken import token except: from etoken import token import re namespace = globals() recurSearcher = set() PrimaryDefList = AstParser([ Ref('FieldDef'), SeqParser([LiteralParser(',', name='\',\''), Ref('FieldDef')]) ], name='PrimaryDefList', toIgnore=[{}, {','}]) FieldDefList = AstParser([ SeqParser( [Ref('FieldDef'), SeqParser([LiteralParser('\n', name='\'\n\'')])]), SeqParser([LiteralParser('\n', name='\'\n\'')]) ], name='FieldDefList', toIgnore=[{}, {'\n'}]) TableDef = AstParser([ Ref('Symbol'), LiteralParser('(', name='\'(\''), Ref('PrimaryDefList'), LiteralParser(')', name='\')\''), SeqParser([LiteralParser('\n', name='\'\n\'')]),
from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser from etoken import token import re namespace = globals() recurSearcher = set() word = LiteralParser('[a-zA-Z_][a-z0-9A-Z_]*', name='word', isRegex=True) escape = LiteralParser('\\', name='escape') newline = LiteralParser('\n', name='newline') number = LiteralParser('\d+', name='number', isRegex=True) bit = LiteralParser('0[XxOoBb][\da-fA-F]+', name='bit', isRegex=True) symbol = AstParser( [SeqParser([LiteralParser('~', name='\'~\'')], atmost=1), Ref('word')], name='symbol') argument = AstParser([Ref('expression')], name='argument') arguments = AstParser([ LiteralParser('(', name='\'(\''), SeqParser([ Ref('argument'), SeqParser([LiteralParser(',', name='\',\''), Ref('argument')]) ], atmost=1), LiteralParser(')', name='\')\'') ], name='arguments', toIgnore=[{}, {',', ')', '('}]) declaration = AstParser([ Ref('symbol'), SeqParser([SeqParser([Ref('arguments')], atmost=1)]),
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Oct 19 18:38:03 2017 @author: misakawa """ from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo import re token = re.compile("t|\)|\(").findall namespace = globals() recurSearcher = set() type = LiteralParser('t', name='type') prefix = AstParser([ Ref('prefix'), LiteralParser('(', name='LP'), SeqParser([Ref('prefix')]), LiteralParser(')', name='RP') ], [Ref('type')], name='prefix') prefix.compile(namespace, recurSearcher)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Oct 17 20:03:08 2017 @author: misakawa """ from Ruikowa.ObjectRegex.Node import Ast, Ref, LiteralParser, CharParser, SeqParser, AstParser from Ruikowa.ObjectRegex.MetaInfo import MetaInfo from Ruikowa.Core.BaseDef import Trace inputs = ['a', '\n', 'abc'] charParser1 = CharParser('a') charParser2 = CharParser('\n') litParser = LiteralParser.RawFormDealer(rawStr='abc', name='ABC') meta = MetaInfo() assert charParser1.match(inputs, meta) is 'a' assert litParser.match(inputs, meta) is None assert charParser2.match(inputs, meta) is '\n' assert litParser.match(inputs, meta) == 'abc'
from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser try: from .etoken import token except: from etoken import token import re namespace = globals() recurSearcher = set() Generic = AstParser([Ref('Identifier'), LiteralParser('<', name='\'<\''), SeqParser([Ref('Type'), SeqParser([LiteralParser(',', name='\',\''), Ref('Type')])], atmost=1), LiteralParser('>', name='\'>\'')], name='Generic', toIgnore=[{}, {',', '<', '>'}]) Identifier = LiteralParser('[a-zA-Z_][a-z0-9A-Z_]*', name='Identifier', isRegex=True) Type = AstParser([Ref('Generic'), SeqParser([LiteralParser('?', name='\'?\'')], atmost=1)], [Ref('Identifier'), SeqParser([LiteralParser('?', name='\'?\'')], atmost=1)], name='Type') Generic.compile(namespace, recurSearcher) Type.compile(namespace, recurSearcher)
from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser from etoken import token import re namespace = globals() recurSearcher = set() Number = AstParser([SeqParser([LiteralParser('-', name='\'-\'')], atmost = 1),LiteralParser('\d+', name='\'\d+\'', isRegex = True),SeqParser([CharParser('.', name='\'.\''),LiteralParser('\d+', name='\'\d+\'', isRegex = True)], atmost = 1),SeqParser([CharParser('E', name='\'E\''),SeqParser([LiteralParser('-', name='\'-\'')], atmost = 1),LiteralParser('\d+', name='\'\d+\'', isRegex = True)], atmost = 1)], name = 'Number') Data = AstParser([Ref('Number'),SeqParser([CharParser(',', name='\',\''),Ref('Number')]),SeqParser([LiteralParser('\n', name='\'\n\'')])], name = 'Data', toIgnore = [{},{'\n',','}]) DataSets = AstParser([SeqParser([Ref('Data')])], name = 'DataSets') Number.compile(namespace, recurSearcher) Data.compile(namespace, recurSearcher) DataSets.compile(namespace, recurSearcher)
from Ruikowa.ObjectRegex.Node import Ast, Ref, LiteralParser, CharParser, SeqParser, AstParser from Ruikowa.ObjectRegex.MetaInfo import MetaInfo from Ruikowa.Core.BaseDef import Trace inputs = ['a', '\n', 'abc'] charParser1 = CharParser('a') charParser2 = CharParser('\n') litParser = LiteralParser.RawFormDealer(rawStr='abc', name='ABC') meta = MetaInfo() assert charParser1.match(inputs, meta) is 'a' assert litParser.match(inputs, meta) is None assert charParser2.match(inputs, meta) is '\n' assert litParser.match(inputs, meta) == 'abc' a = LiteralParser('a', name='a') c = LiteralParser('c', name='c') d = LiteralParser('d', name='d') ASeq = AstParser([Ref('U'), d], [a], name='ASeq') U = AstParser([Ref('ASeq'), c], name='U') namespace = globals() seset = set() ASeq.compile(namespace, seset) x = MetaInfo() print(ASeq.match(['a', 'c', 'd', 'c', 'd', 'k'], x)) a = LiteralParser('a', name='a') c = LiteralParser('c', name='c') d = LiteralParser('d', name='d') ASeq = AstParser([Ref('ASeq'), d], [a], name='ASeq') #U = AstParser([Ref('ASeq'), c], name = 'U') namespace = globals() seset = set()
from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser try: from .etoken import token except: from etoken import token import re namespace = globals() recurSearcher = set() Def = AstParser([ LiteralParser('def', name='\'def\''), Ref('Name'), CharParser('(', name='\'(\''), Ref('paramList'), CharParser(')', name='\')\'') ], name='Def', toIgnore=[{}, {'(', 'def', ':', ')'}]) NameIg = AstParser([ LiteralParser('[a-zA-Z_][a-zA-Z_0-9]*', name='\'[a-zA-Z_][a-zA-Z_0-9]*\'', isRegex=True) ], name='NameIg') param = AstParser([ SeqParser([CharParser('*', name='\'*\'')], atmost=1), Ref('Name'), SeqParser([CharParser(':', name='\':\''), Ref('NameIg')], atmost=1), SeqParser([ CharParser('=', name='\'=\''), LiteralParser('None', name='\'None\'')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Oct 17 20:06:10 2017 @author: misakawa """ from Ruikowa.ObjectRegex.Node import Ast, Ref, LiteralParser, CharParser, SeqParser, AstParser from Ruikowa.ObjectRegex.MetaInfo import MetaInfo from Ruikowa.Core.BaseDef import Trace a = LiteralParser('a', name='a') c = LiteralParser('c', name='c') d = LiteralParser('d', name='d') ASeq = AstParser([Ref('ASeq'), d], [a], name='ASeq') #U = AstParser([Ref('ASeq'), c], name = 'U') namespace = globals() seset = set() ASeq.compile(namespace, seset) x = MetaInfo() print(ASeq.match(['a', 'd', 'd', 'd', 'd', 'd'], x))
from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser try: from .etoken import token except: from etoken import token import re namespace = globals() recurSearcher = set() Atom = LiteralParser('[^\(\)\s\`]+', name='Atom', isRegex=True) Expr = AstParser([Ref('Atom')], [Ref('Quote')], [ CharParser('(', name='\'(\''), SeqParser([ SeqParser([Ref('NEWLINE')]), SeqParser([Ref('Expr')]), SeqParser([Ref('NEWLINE')]) ]), CharParser(')', name='\')\'') ], name='Expr', toIgnore=[{}, {'\n'}]) Quote = AstParser([CharParser('`', name='\'`\''), Ref('Expr')], name='Quote') NEWLINE = CharParser('\n', name='NEWLINE') Stmt = AstParser([ SeqParser([ SeqParser([Ref('NEWLINE')]), SeqParser([Ref('Expr')]), SeqParser([Ref('NEWLINE')]) ]) ], name='Stmt', toIgnore=[{}, {'\n'}])
from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser try: from .etoken import token except: from etoken import token import re namespace = globals() recurSearcher = set() Any = LiteralParser('^((?!/\*|\*/|\n)[\s\S])*$', name = 'Any', isRegex = False) multilineComment = AstParser([LiteralParser('/*', name='\'/*\''),SeqParser([DependentAstParser([Ref('Any')],[Ref('NEWLINE')],[Ref('multilineComment')])]),LiteralParser('*/', name='\'*/\'')], name = 'multilineComment') Comment = LiteralParser('//[^\n]*', name = 'Comment', isRegex = True) String = LiteralParser('[a-z]*"[\w|\W]*"', name = 'String', isRegex = True) numberLiteral = LiteralParser('0[XxOoBb][\da-fA-F]+', name = 'numberLiteral', isRegex = True) Decimal = LiteralParser('\d+(?:\.\d+|)(?:E\-{0,1}\d+|)', name = 'Decimal', isRegex = True) Constant = LiteralParser('null|false|true', name = 'Constant', isRegex = False) NEWLINE = LiteralParser('\n', name = 'NEWLINE', isRegex = True) EOL = LiteralParser(';', name = 'EOL', isRegex = True) I = AstParser([Ref('NEWLINE')],[Ref('Comment')],[Ref('multilineComment')], name = 'I') simpleName = LiteralParser('[a-zA-Z_][a-z0-9A-Z_]*', name = 'simpleName', isRegex = True) Identifier = AstParser([Ref('simpleName')],[LiteralParser('`', name='\'`\''),Ref('simpleName'),LiteralParser('`', name='\'`\'')], name = 'Identifier') labelDeclaration = AstParser([LiteralParser(':', name='\':\''),Ref('Identifier')], name = 'labelDeclaration') block = AstParser([LiteralParser('{', name='\'{\''),SeqParser([Ref('variableDeclarationEntryList'),SeqParser([LiteralParser(':', name='\':\''),Ref('Type')],[LiteralParser('=>', name='\'=>\''),Ref('Type')], atmost = 1),LiteralParser('->', name='\'->\'')], atmost = 1),Ref('statements'),LiteralParser('}', name='\'}\'')], name = 'block') body = AstParser([Ref('block')],[Ref('statement')], name = 'body') module = AstParser([Ref('simpleName'),SeqParser([LiteralParser(',', name='\',\''),Ref('simpleName')])], name = 'module') moduleDeclaration = AstParser([LiteralParser('module', name='\'module\''),Ref('module')], name = 'moduleDeclaration') Import = AstParser([LiteralParser('import', name='\'import\''),Ref('module')], name = 'Import') statement = AstParser([DependentAstParser([Ref('flowControl')],[Ref('declaration')],[Ref('flowControlSign'),SeqParser([Ref('Identifier')], atmost = 1)],[Ref('expression')]),SeqParser([Ref('EOL')], atmost = 1)], name = 'statement') flowControlSign = LiteralParser('break|return|continue', name = 'flowControlSign', isRegex = False) flowControl = AstParser([Ref('If')],[Ref('While')], name = 'flowControl') If = AstParser([LiteralParser('if', name='\'if\''),LiteralParser('(', name='\'(\''),Ref('expression'),LiteralParser(')', name='\')\''),Ref('body'),SeqParser([LiteralParser('else', name='\'else\''),Ref('body')], atmost = 1)], name = 'If')