def _prepare_sql_parser(): """ Подготовка SQL-like парсера, для разбора условий в фильтрах Returns ------- simple_sql : obj Объект класса отвечающего за парсинг """ # define SQL tokens select_stmt = Forward() AND, OR, IN, NIN = map(CaselessKeyword, "and or in nin".split()) ident = Word(alphas, alphanums + "_$").setName("identifier") column_name = delimitedList(ident, ".", combine=True).setName("column name") column_name.addParseAction() binop = oneOf("= != > < <= >=", caseless=True) real_num = ppc.real() int_num = ppc.signed_integer() column_rval = (real_num | int_num | quotedString | column_name ) # need to add support for alg expressions where_condition = Group( (column_name + binop + column_rval) | (column_name + IN + Group("(" + delimitedList(column_rval) + ")")) | (column_name + IN + Group("(" + select_stmt + ")"))) where_expression = infixNotation( where_condition, [ (AND, 2, opAssoc.LEFT), (OR, 2, opAssoc.LEFT), ], ) # define the grammar select_stmt <<= where_expression simple_sql = select_stmt # define Oracle comment format, and ignore them oracle_sql_comment = "--" + restOfLine simple_sql.ignore(oracle_sql_comment) return simple_sql
selectStmt = Forward() SELECT, FROM, WHERE = map(CaselessKeyword, "select from where".split()) ident = Word( alphas, alphanums + "_$" ).setName("identifier") columnName = delimitedList(ident, ".", combine=True).setName("column name") columnName.addParseAction(pyparsing_common.upcaseTokens) columnNameList = Group( delimitedList(columnName)) tableName = delimitedList(ident, ".", combine=True).setName("table name") tableName.addParseAction(pyparsing_common.upcaseTokens) tableNameList = Group(delimitedList(tableName)) whereExpression = Forward() and_, or_, in_ = map(CaselessKeyword, "and or in".split()) binop = oneOf("= != < > >= <= eq ne lt le gt ge", caseless=True) realNum = pyparsing_common.real() intNum = pyparsing_common.signed_integer() columnRval = realNum | intNum | quotedString | columnName # need to add support for alg expressions whereCondition = Group( ( columnName + binop + columnRval ) | ( columnName + in_ + "(" + delimitedList( columnRval ) + ")" ) | ( columnName + in_ + "(" + selectStmt + ")" ) | ( "(" + whereExpression + ")" ) ) whereExpression << whereCondition + ZeroOrMore( ( and_ | or_ ) + whereExpression ) # define the grammar selectStmt <<= (SELECT + ('*' | columnNameList)("columns") + FROM + tableNameList( "tables" ) + Optional(Group(WHERE + whereExpression), "")("where"))
from pyparsing import (alphanums, alphas, CaselessKeyword, delimitedList, Group, infixNotation, oneOf, opAssoc, pyparsing_common as ppc, quotedString, Word) _logger = logging.getLogger(__name__) AND, OR, IN, IS, NOT, NULL, BETWEEN = map( CaselessKeyword, "and or in is not null between".split()) NOT_NULL = NOT + NULL ident = Word(alphas, alphanums + "_$").setName("identifier") columnName = delimitedList(ident, ".", combine=True).setName("column name") binop = oneOf("= == != < > >= <= eq ne lt le gt ge <>", caseless=False) realNum = ppc.real() intNum = ppc.signed_integer() columnRval = (realNum | intNum | quotedString | columnName) # need to add support for alg expressions whereCondition = Group( (columnName + binop + columnRval) | (columnName + IN + Group("(" + delimitedList(columnRval) + ")")) | (columnName + IS + (NULL | NOT_NULL)) | (columnName + BETWEEN + columnRval + AND + columnRval)) whereExpression = infixNotation( Group(whereCondition | NOT + whereCondition
def create_sql(self): selectStmt = Forward() SELECT, FROM, WHERE, AND, OR, IN, IS, NOT, NULL, COUNT, AVG, MIN, MAX, SUM, AS = map( CaselessKeyword, "select from where and or in is not null count avg min max sum as". split()) NOT_NULL = NOT + NULL ident = Word(alphas, alphanums + "_$").setName("identifier") alias = delimitedList(ident, ".", combine=True).setName("alias") alias.addParseAction(ppc.upcaseTokens) columnName = delimitedList(ident, ".", combine=True).setName("column name") columnName.addParseAction(ppc.upcaseTokens) columnNameList = Group(delimitedList(columnName)) tableName = delimitedList(ident, ".", combine=True).setName("table name") tableName.addParseAction(ppc.upcaseTokens) tableNameRalias = Group((tableName("table") + AS + alias("alias")) | (tableName("table"))) tableNameList = Group(delimitedList(tableNameRalias)) binop = oneOf("= != < > >= <= eq ne lt le gt ge", caseless=True) realNum = ppc.real() intNum = ppc.signed_integer() columnRval = realNum | intNum | quotedString | columnName # need to add support for alg expressions val = realNum | intNum | quotedString columnRstar = '*' | columnName # need to add support for alg expressions EquiJoin = (columnName('col1') + '=' + columnName('col2')) equalityPredicate = columnName('col1') + '=' + columnRval('val') Predicates = Group((columnName('col1') + binop + columnRval) | (columnName('col1') + IN + Group("(" + delimitedList(columnRval) + ")")) | (columnName('col1') + IN + Group("(" + selectStmt + ")")) | (columnName + IS + (NULL | NOT_NULL))) whereCondition = Group( EquiJoin('equijoin') | equalityPredicate('equalitypredicate') | Predicates('otherPredicates')) whereCondition_sketch = Group( EquiJoin('equijoin') | equalityPredicate('equalitypredicate')) Aggregates = Group(((COUNT | AVG | MIN | MAX | SUM)("operator") + (Group("(" + columnName + ")"))("operand")) | (COUNT("operator") + Group("(" + "*" + ")")("operand"))) AggregateExpression = delimitedList(Aggregates) whereExpression_predicates = infixNotation(whereCondition_sketch, [ (AND, 2, opAssoc.LEFT), ]) whereExpression = infixNotation(whereCondition, [ (NOT, 1, opAssoc.RIGHT), (AND, 2, opAssoc.LEFT), (OR, 2, opAssoc.LEFT), ]) # define the grammar selectStmt <<= (SELECT + ((AggregateExpression)("aggregates") | ('*' | columnNameList)("columns")) + FROM + tableNameList("tables") + WHERE + (whereExpression_predicates)("sketch_predicates") + Optional(AND + (whereExpression)("ignored_predicates"))) simpleSQL = selectStmt # define Oracle comment format, and ignore them oracleSqlComment = "--" + restOfLine simpleSQL.ignore(oracleSqlComment) return simpleSQL
def _parse_twr_period(timing_str: str) -> pp.ParseResults: """Parse period constraints from an ISE timing report Expects the default ISE verbose output from a command like: :: trce -v 3 -n 3 -fastpaths top.ncd top.pcf -o top.twr """ # Look for a section of the report like the following and extract the # constraint, path information, and minimum period. # # ================================================================================ # Timing constraint: TS_clk = PERIOD TIMEGRP "clk" 150 MHz HIGH 50%; # For more information, see Period Analysis in the Timing Closure User Guide (UG612). # # 39892 paths analyzed, 3774 endpoints analyzed, 632 failing endpoints # 632 timing errors detected. (632 setup errors, 0 hold errors, 0 component switching limit errors) # Minimum period is 10.877ns. # -------------------------------------------------------------------------------- # # or # # ================================================================================ # Timing constraint: TS_soclinux_crg_pll_sdram_half_b = PERIOD TIMEGRP # "soclinux_crg_pll_sdram_half_b" TS_soclinux_crg_clk50b / 3.33333333 # PHASE 4.16666667 ns HIGH 50%; # For more information, see Period Analysis in the Timing Closure User Guide (UG612). # # 0 paths analyzed, 0 endpoints analyzed, 0 failing endpoints # 0 timing errors detected. (0 component switching limit errors) # Minimum period is 1.730ns. # -------------------------------------------------------------------------------- period = ppc.real("min period") + pp.Suppress("ns") # Build up a case-insensitive match for any of the below units units = ["ps", "ns", "micro", "ms", "%", "MHz", "GHz", "kHz"] pp_units = pp.CaselessLiteral(units[0]) for u in units[1:]: pp_units |= pp.CaselessLiteral(u) hl = pp.Literal("HIGH") | pp.Literal("LOW") num = ppc.number + pp.Optional(pp_units) jitter = pp.Optional("INPUT_JITTER" + num) # Remove leading and trailing whitespace and any line breaks # # SkipTo in the below timespec parser will pickup whitespace including # new lines if they are included in the report. def remove_ws_and_newlines(s): lines = [l.strip() for l in s.splitlines()] return " ".join(lines) timespec = (pp.Suppress("Timing constraint:") + pp.Word(pp.printables)("timespec") + pp.Suppress("= PERIOD TIMEGRP") + pp.Word(pp.printables)("timegroup") + pp.SkipTo(hl)("constraint").setParseAction( pp.tokenMap(remove_ws_and_newlines)) + pp.Suppress(hl + num + jitter + ";")) # Parse the path information from the report like: # # 0 paths analyzed, 0 endpoints analyzed, 0 failing endpoints # 0 timing errors detected. (0 component switching limit errors) # # or # # 266 paths analyzed, 235 endpoints analyzed, 0 failing endpoints # 0 timing errors detected. (0 setup errors, 0 hold errors, 0 component switching limit errors) stats = ( ppc.integer("paths") + pp.Suppress("paths analyzed,") + ppc.integer("endpoints") + pp.Suppress("endpoints analyzed,") + ppc.integer("failing") + pp.Suppress("failing endpoints") + ppc.integer("timing errors") + pp.Suppress("timing errors detected. (") + pp.Optional( ppc.integer("setup errors") + pp.Suppress("setup errors,") + ppc.integer("hold errors") + pp.Suppress("hold errors,")) + ppc.integer("switching limit errors") + pp.Suppress("component switching limit errors)")) # It's not clear why this doesn't show up for one timing constraint in # the LiteX Linux VexRISCV example min_period = pp.Optional(pp.Suppress("Minimum period is") + period) constraint = timespec + pp.Suppress( pp.SkipTo(stats)) + stats + min_period result = constraint.searchString(timing_str) return result
selectStmt = Forward() SELECT, FROM, WHERE, AND, OR, IN, IS, NOT, NULL = map( CaselessKeyword, "select from where and or in is not null".split()) NOT_NULL = NOT + NULL ident = Word(alphas, alphanums + "_$").setName("identifier") columnName = delimitedList(ident, ".", combine=True).setName("column name") columnName.addParseAction(ppc.upcaseTokens) columnNameList = Group(delimitedList(columnName).setName("column_list")) tableName = delimitedList(ident, ".", combine=True).setName("table name") tableName.addParseAction(ppc.upcaseTokens) tableNameList = Group(delimitedList(tableName).setName("table_list")) binop = oneOf("= != < > >= <= eq ne lt le gt ge", caseless=True).setName("binop") realNum = ppc.real().setName("real number") intNum = ppc.signed_integer() columnRval = (realNum | intNum | quotedString | columnName).setName( "column_rvalue") # need to add support for alg expressions whereCondition = Group( (columnName + binop + columnRval) | (columnName + IN + Group("(" + delimitedList(columnRval).setName("in_values_list") + ")")) | (columnName + IN + Group("(" + selectStmt + ")")) | (columnName + IS + (NULL | NOT_NULL))).setName("where_condition") whereExpression = infixNotation( whereCondition, [ (NOT, 1, opAssoc.RIGHT),
def create(cls, base_shader_path, base_texture_path): """ Create a Stanford polygon file parser (PLY). :param base_shader_path: :param base_texture_path: :return: """ # Define the base patterns for parsing real = pyparsing_common.real() integer = pyparsing_common.integer() # Define how the header portion begins and ends start_keyword = cls._or(cls.begin_header_keyword, suppress=True) stop_keyword = cls._or(cls.end_header_keyword, suppress=True) # Define the grammar of a comment statement comment_keyword = cls._or(cls.comment_keyword, suppress=True) vertex_shader_comment = Group( comment_keyword + Suppress(CaselessKeyword("VertexShaderFile")) + Word(alphanums + ".-_"))("vertex_shader_file") fragment_shader_comment = Group( comment_keyword + Suppress(CaselessKeyword("FragmentShaderFile")) + Word(alphanums + ".-_"))("fragment_shader_file") texture_comment = Group(comment_keyword + Suppress(CaselessKeyword("TextureFile")) + Word(alphanums + ".-_"))("texture_file") other_comment = comment_keyword + NotAny("TextureFile") + Word( printables + " ") # Define the grammar of a format statement format_keyword = cls._or(cls.format_keyword, suppress=True) format_type = cls._or(cls.format_type_map) format_expr = Group(format_keyword + format_type("file_type") + real("version"))("format") # Define the grammar of properties property_keyword = cls._or(cls.property_keyword, suppress=True) list_keyword = cls._or(cls.list_keyword, suppress=True) property_type = cls._or(cls.data_type_map) psp = property_keyword + property_type("data_type") position_keywords = [cls._or(k) for k in ("x", "y", "z")] property_position = cls._aggregate_property("position", psp, *position_keywords) property_color = Group( And([ Group(psp + MatchFirst((CaselessKeyword("r"), CaselessKeyword("red")))("name")), Group(psp + MatchFirst((CaselessKeyword("g"), CaselessKeyword("green")))("name")), Group(psp + MatchFirst((CaselessKeyword("b"), CaselessKeyword("blue")))("name")), Optional( Group(psp + MatchFirst((CaselessKeyword("a"), CaselessKeyword("alpha")))("name")), ) ]))("color") ambient_keywords = [ cls._or(k) for k in ("ambient_red", "ambient_green", "ambient_blue", "ambient_alpha") ] property_ambient_color = cls._aggregate_property( "ambient_color", psp, *ambient_keywords) diffuse_keywords = [ cls._or(k) for k in ("diffuse_red", "diffuse_green", "diffuse_blue", "diffuse_alpha") ] property_diffuse_color = cls._aggregate_property( "diffuse_color", psp, *diffuse_keywords) specular_keywords = [ cls._or(k) for k in ("specular_red", "specular_green", "specular_blue", "specular_alpha") ] property_specular_color = cls._aggregate_property( "specular_color", psp, *specular_keywords) texture_keywords = [ cls._or(*k) for k in (("s", "u", "tx"), ("t", "v", "ty")) ] property_texture = cls._aggregate_property("texture", psp, *texture_keywords) normal_keywords = [cls._or(k) for k in ("nx", "ny", "nz")] property_normal = cls._aggregate_property("normal", psp, *normal_keywords) power_keywords = [CaselessKeyword("specular_power")] property_specular_power = cls._aggregate_property( "specular_power", psp, *power_keywords) opacity_keywords = [CaselessKeyword("opacity")] property_opacity = cls._aggregate_property("opacity", psp, *opacity_keywords) plp = property_keyword + list_keyword + property_type( "index_type") + property_type("data_type") vertex_index_keywords = [cls._or("vertex_index", "vertex_indices")] property_vertex_index = cls._aggregate_property( "vertex_index", plp, *vertex_index_keywords) material_index_keywords = [ cls._or("material_index", "material_indices") ] property_material_index = cls._aggregate_property( "material_index", plp, *material_index_keywords) # Define the grammar of elements element_keyword = cls._or(cls.element_keyword, suppress=True) element_vertex = Group( element_keyword + CaselessKeyword("vertex")("name") + integer("count") + Group( OneOrMore(property_position | property_color | property_ambient_color | property_diffuse_color | property_specular_color | property_texture | property_normal | property_specular_power | property_opacity))("properties")) element_face = Group(element_keyword + CaselessKeyword("face")("name") + integer("count") + Group(property_vertex_index | property_material_index)("properties")) element_group = element_vertex | element_face declarations = format_expr + \ Group(ZeroOrMore(vertex_shader_comment | fragment_shader_comment | texture_comment | other_comment))("comments") + \ Group(OneOrMore(element_group))("elements") header_grammar = start_keyword + declarations + stop_keyword return cls(header_grammar, base_shader_path, base_texture_path)
PROGRAMA, CAR, INT, RETORNE = map(CaselessKeyword, "programa car int retorne".split()) ESCREVA, NOVALINHA, SE, ENTAO = map(CaselessKeyword, "escreva novalinha se entao".split()) SENAO, ENQUANTO, EXECUTE, LEIA, TERMINATOR = map( CaselessKeyword, "senao enquanto execute leia ;".split()) keywords = MatchFirst( (PROGRAMA, CAR, INT, RETORNE, ESCREVA, NOVALINHA, SE, ENTAO, SENAO, ENQUANTO, EXECUTE, LEIA)).setName("Reserved Words") #Define the Terminator character TERMINATOR = Word(";").setName("Terminator") #Define the numbers realNum = ppc.real().setName("Real Number") intNum = ppc.signed_integer().setName("Integer Number") #Define the identificator identifier = Word(alphas, alphanums + "_$").setName("Identifier") #Types Definition Type = (INT | CAR).setName("Type") #<<<<<<<<<<<<<<< BASICS DEFINITIONS<<<<<<<<<<<<<<<<<<<<<<<< #>>>>>>>>>>>>>>> EXPRESSIONS DECLARATIONS>>>>>>>>>>>>>>>>>> #Assigning the recursive expressions Command = Forward() Expr = Forward()
FALSE = make_keyword("false", False) NULL = make_keyword("null", None) ed5 = make_keyword("|>|", 'increased_in_magnitude_relative_to') ed6 = make_keyword("|<|", 'decreased_in_magnitude_relative_to') ed7 = make_keyword("|=|", 'has_count') ed1 = make_keyword(">", 'has_part') ed3 = make_keyword("<", 'part_of') ed2 = make_keyword('>>', 'bearer_of') ed4 = make_keyword('<<', 'inheres_in') #graph_word = Word(alphanums +"_", alphanums+"_") #jsonNumber = ppc.number() phsInt = ppc.integer()('num_int') phsReal = ppc.real()('num_real') graph_alphanum = Word(alphanums + "_" + "-") #graph_alphanum = Word(alphanums +"_", alphanums+"_") graph_word = (phsReal | phsInt | graph_alphanum) #----------- Node # Node Properties: N[] propertyWord = Word(alphanums + "_" + ":") # in [] #jsonString = quotedString().setParseAction(removeQuotes) jsonStr = quotedString().setParseAction(removeQuotes) jsonString = (jsonStr | propertyWord) jsonNumber = ppc.number() #jsonElements = delimitedList(jsonValue)
def __mk_grammar(self): """ Метод, в котором создаётся описание грамматики, вызывается в конструкторе класса Parser. """ # Описание LiteralNode и IdentNode num = ppc.integer() | ppc.real() str_ = pp.QuotedString('"', escChar='\\', unquoteResults=True, convertWhitespaceEscapes=False) literal = (num | str_).setName('Literal') ident = ppc.identifier.setName('Ident') # Описание ключевых слов VAR_KW, FUNC_KW, RETURN_KW = pp.Keyword('var'), pp.Keyword('function'), pp.Keyword('return') IF_KW, ELSE_KW = pp.Keyword('if'), pp.Keyword('else') FOR_KW, DO_KW, WHILE_KW = pp.Keyword('for'), pp.Keyword('do'), pp.Keyword('while') # Описание различных скобок, запятой и точки с запятой. L_PAR, R_PAR = pp.Literal('(').suppress(), pp.Literal(')').suppress() L_BRACKET, R_BRACKET = pp.Literal('{').suppress(), pp.Literal('}').suppress() SEMICOLON, COMMA = pp.Literal(';').suppress(), pp.Literal(',').suppress() # Описание операторов ASSIGN = pp.Literal('=') ADD, SUB, MUL, DIV, MOD, EXP = pp.Literal('+'), pp.Literal('-'), pp.Literal('*'), pp.Literal('/'), \ pp.Literal('%'), pp.Literal('**') LOG_AND, LOG_OR, LOG_NOT = pp.Literal('&&'), pp.Literal('||'), pp.Literal('!') GT, LT, GE, LE = pp.Literal('>'), pp.Literal('<'), pp.Literal('>='), pp.Literal('<=') NEQ, EQ = pp.Literal('!='), pp.Literal('==') INCR, DECR = pp.Literal('++'), pp.Literal('--') # Объявляем переменные, описывающие операции умножения, сложения и Выражение. Они определяются дальше в коде. mul_op = pp.Forward() add_op = pp.Forward() expr = pp.Forward() # Описание вызова функции call = (ident + L_PAR + pp.Optional(expr + pp.ZeroOrMore(COMMA + expr)) + R_PAR).setName('Call') # Описание унарных операций: инкремент, декремент. incr_op = (ident + INCR).setName('UnaryExpr') decr_op = (ident + DECR).setName('UnaryExpr') group = (literal | call | ident | L_PAR + expr + R_PAR) # Описание бинарных выражений. mul_op << pp.Group(group + pp.ZeroOrMore((EXP | MUL | DIV | MOD) + group)).setName('BinExpr') add_op << pp.Group(mul_op + pp.ZeroOrMore((ADD | SUB) + mul_op)).setName('BinExpr') compare = pp.Group(add_op + pp.ZeroOrMore((GE | LE | GT | LT) + add_op)).setName('BinExpr') compare_eq = pp.Group(compare + pp.ZeroOrMore((EQ | NEQ) + compare)).setName('BinExpr') log_and_op = pp.Group(compare_eq + pp.ZeroOrMore(LOG_AND + compare_eq)).setName('BinExpr') log_or_op = pp.Group(log_and_op + pp.ZeroOrMore(LOG_OR + log_and_op)).setName('BinExpr') expr << log_or_op # Описание присвоения и объявления переменных. assign = (ident + ASSIGN + expr).setName('BinExpr') simple_assign = (ident + ASSIGN.suppress() + expr) var_item = simple_assign | ident simple_var = (VAR_KW.suppress() + var_item).setName('Declarator') mult_var_item = (COMMA + var_item).setName('Declarator') mult_var = (simple_var + pp.ZeroOrMore(mult_var_item)).setName('VarDeclaration') stmt = pp.Forward() simple_stmt = assign | call | incr_op | decr_op # Описание цикла for. for_statement_list = pp.Optional(simple_stmt + pp.ZeroOrMore(COMMA + simple_stmt)).setName('BlockStatement') for_statement = mult_var | for_statement_list for_test = expr | pp.Group(pp.empty) for_block = stmt | pp.Group(SEMICOLON).setName('BlockStatement') # Описание циклов for, while, do while, условного оператора if. if_ = (IF_KW.suppress() + L_PAR + expr + R_PAR + stmt + pp.Optional(ELSE_KW.suppress() + stmt)).setName('If') for_ = (FOR_KW.suppress() + L_PAR + for_statement + SEMICOLON + for_test + SEMICOLON + for_statement + R_PAR + for_block).setName('For') while_ = (WHILE_KW.suppress() + L_PAR + expr + R_PAR + stmt).setName('While') do_while = (DO_KW.suppress() + stmt + WHILE_KW.suppress() + L_PAR + expr + R_PAR).setName('DoWhile') # Описание блока кода в { } и без них, аргументов функции, объявления функции и оператора return. block = pp.ZeroOrMore(stmt + pp.ZeroOrMore(SEMICOLON)).setName('BlockStatement') br_block = L_BRACKET + block + R_BRACKET args = ((expr + pp.ZeroOrMore(COMMA + expr)) | pp.Group(pp.empty)).setName("Args") func_decl = (FUNC_KW.suppress() + ident + L_PAR + args + R_PAR + br_block)\ .setName('FuncDeclaration') return_ = (RETURN_KW.suppress() + expr).setName('Return') stmt << ( if_ | for_ | while_ | do_while | br_block | mult_var + SEMICOLON | simple_stmt + SEMICOLON | func_decl | return_ ) # locals().copy().items() возвращает словарь всех переменных в текущей области видимости # все элементы этого словаря перебираются в цикле for for var_name, value in locals().copy().items(): # проверка на то, что текущий элемент является экземлпяром класса ParserElement if isinstance(value, pp.ParserElement): # вызов метода __set_parse_action self.__set_parse_action(var_name, value) return block.ignore(pp.cStyleComment).ignore(pp.dblSlashComment) + pp.stringEnd
def __init__(self): """ A program is a list of statements. Statements can be 'set' or 'select' statements. """ statement = Forward() SELECT, FROM, WHERE, SET, AS = map(CaselessKeyword, "select from where set as".split()) ident = Word( "$" + alphas, alphanums + "_$" ).setName("identifier") columnName = delimitedList(ident, ".", combine=True).setName("column name") columnNameList = Group( delimitedList(columnName)) tableName = delimitedList(ident, ".", combine=True).setName("column name") tableNameList = Group(delimitedList(tableName)) SEMI,COLON,LPAR,RPAR,LBRACE,RBRACE,LBRACK,RBRACK,DOT,COMMA,EQ = map(Literal,";:(){}[].,=") arrow = Literal ("->") t_expr = Group(ident + LPAR + Word("$" + alphas, alphanums + "_$") + RPAR + ZeroOrMore(LineEnd())).setName("t_expr") | \ Word(alphas, alphanums + "_$") + ZeroOrMore(LineEnd()) t_expr_chain = t_expr + ZeroOrMore(arrow + t_expr) whereExpression = Forward() and_, or_, in_ = map(CaselessKeyword, "and or in".split()) binop = oneOf("= != < > >= <= eq ne lt le gt ge", caseless=True) realNum = ppc.real() intNum = ppc.signed_integer() columnRval = realNum | intNum | quotedString | columnName # need to add support for alg expressions whereCondition = Group( ( columnName + binop + (columnRval | Word(printables) ) ) | ( columnName + in_ + "(" + delimitedList( columnRval ) + ")" ) | ( columnName + in_ + "(" + statement + ")" ) | ( "(" + whereExpression + ")" ) ) whereExpression << whereCondition + ZeroOrMore( ( and_ | or_ ) + whereExpression ) ''' Assignment for handoff. ''' setExpression = Forward () setStatement = Group( ( ident ) | ( quotedString("json_path") + AS + ident("name") ) | ( "(" + setExpression + ")" ) ) setExpression << setStatement + ZeroOrMore( ( and_ | or_ ) + setExpression ) optWhite = ZeroOrMore(LineEnd() | White()) """ Define the statement grammar. """ statement <<= ( Group( Group(SELECT + t_expr_chain)("concepts") + optWhite + Group(FROM + tableNameList) + optWhite + Group(Optional(WHERE + whereExpression("where"), "")) + optWhite + Group(Optional(SET + setExpression("set"), ""))("select") ) | Group( SET + (columnName + EQ + ( quotedString | intNum | realNum )) )("set") )("statement") """ Make a program a series of statements. """ self.program = statement + ZeroOrMore(statement) """ Make rest-of-line comments. """ comment = "--" + restOfLine self.program.ignore (comment)
alphanums, Combine, oneOf, Optional, QuotedString, Forward, Group, ZeroOrMore, srange, pyparsing_common as ppc, ) MARK, UNMARK, AT, COLON, QUOTE = map(Suppress, "[]@:'") NUMBER = ppc.integer() FLOAT = ppc.real() STRING = QuotedString('"', multiline=True) | QuotedString("'", multiline=True) WORD = Word(alphas, alphanums + "_:") ATTRIBUTE = Combine(AT + WORD) strBody = Forward() def setBodyLength(tokens): strBody << Word(srange(r"[\0x00-\0xffff]"), exact=int(tokens[0])) return "" BLOB = Combine( QUOTE + Word(nums).setParseAction(setBodyLength) + COLON + strBody + QUOTE )