def convert(inputs: List[Input]) -> str:
        result = "" if len(inputs) <= 1 else Generator("bootstrap").bootstrap()

        for input in inputs:
            generator = Generator(input.namespace)
            visitor = Visitor(generator)

            if result is None:
                result = generator.bootstrap()

            lexer = VMLexer(input.stream)
            tokens = antlr4.CommonTokenStream(lexer)
            parser = VMParser(tokens)
            tree = parser.program()

            result = result + visitor.visit(tree)

        return result
Exemple #2
0
def from_sstream(sstream: str, translator: nars2networkx) -> None:
    ## Create lexer and tokenize input
    # print("Create lexer for...")
    # print(sstream)
    lexer = NarseseLexer(antlr.InputStream(sstream))
    # print("Tokenize...")
    token_stream = antlr.CommonTokenStream(lexer)
    ## Create parser and build AST
    # print("Create parser...")
    parser = NarseseParser(token_stream)
    parser._interp.predictionMode = antlr.PredictionMode.SLL
    # print("Parse...")
    tree = parser.narsese()
    ## Create Tree walker and inject the translator over a networkx graph
    walker = antlr.ParseTreeWalker()
    ## Roll
    # print("Walk tree...")
    walker.walk(translator, tree)
Exemple #3
0
    def process(self, infile, outfile):
        """
        Processes a SDoc1 document.

        :param str infile: The input filename with the SDoc2 document.
        :param str outfile: The output filename with the target document.
        """
        in_stream = antlr4.FileStream(infile, 'utf-8')

        lexer = sdoc2Lexer(in_stream)
        tokens = antlr4.CommonTokenStream(lexer)
        parser = sdoc2Parser(tokens)
        tree = parser.sdoc()
        visitor = SDoc2Visitor()

        visitor.visit(tree)

        sdoc.sdoc2.node_store.prepare_content_tree()
Exemple #4
0
 def ParseFile(self):
     self.PreprocessFile()
     # restore from ListOfList to ListOfString
     self.Profile.FileLinesList = [
         "".join(list) for list in self.Profile.FileLinesList
     ]
     FileStringContents = ''
     for fileLine in self.Profile.FileLinesList:
         FileStringContents += fileLine
     for Token in self.TokenReleaceList:
         if Token in FileStringContents:
             FileStringContents = FileStringContents.replace(
                 Token, 'TOKENSTRING')
     cStream = antlr.InputStream(FileStringContents)
     lexer = CLexer(cStream)
     tStream = antlr.CommonTokenStream(lexer)
     parser = CParser(tStream)
     parser.translation_unit()
def parse(input_stream):
    lexer = AgentScriptCCLexer(input_stream)
    stream = antlr4.CommonTokenStream(lexer)
    parser = AgentScriptCCParser(stream)

    parser.removeErrorListeners()
    errorListener = AgentScriptCCErrorListener()
    parser.addErrorListener(errorListener)

    tree = parser.program()
    loader = AgentScriptCCLoader()
    walker = antlr4.ParseTreeWalker()
    walker.walk(loader, tree)

    program = loader.program
    program.parsing_errors = errorListener.errors

    return program
    def parse(self):
        """
        Parse the input query and store the output in self.tree.

        """
        inpt = antlr4.InputStream(self.query)
        lexer = ADQLLexer(inpt)
        self.stream = antlr4.CommonTokenStream(lexer)
        self.parser = ADQLParser(self.stream)
        self.syntax_error_listener = SyntaxErrorListener()
        self.parser._listeners = [self.syntax_error_listener]

        self.tree = self.parser.query()

        if len(self.syntax_error_listener.syntax_errors):
            raise QuerySyntaxError(self.syntax_error_listener.syntax_errors)

        self.walker = antlr4.ParseTreeWalker()
def compile(filename):

    # prepare file to be parsed
    inputfile = a4.FileStream(filename)
    lexer = ClassLayoutLexer(inputfile)
    stream = a4.CommonTokenStream(lexer)
    # parse to a tree
    parser = ClassLayoutParser(stream)
    tree = parser.u2cFile()

    # open the listener
    fl = FileListener()

    # walk the tree
    walker = a4.ParseTreeWalker()
    walker.walk(fl, tree)

    print(fl.c.write('java'))
Exemple #8
0
def main():

    # Setup lexers and parsers
    lexer = FrackLexer(antlr4.FileStream(TEST_FILE))
    stream = antlr4.CommonTokenStream(lexer)
    parser = FrackParser(stream)
    tree = parser.program()

    # Actually parse the program
    listener = Listener()
    walker = antlr4.ParseTreeWalker()
    #walker.walk(listener, tree)
    print(tree.toStringTree())

    for f in listener.functions:
        print(f)

    print('done!')
Exemple #9
0
def compile(nndl_content):
    """
    """
    input_stream = antlr4.InputStream(nndl_content)
    lexer = NNDLLexer.NNDLLexer(input_stream)
    stream = antlr4.CommonTokenStream(lexer)
    parser = NNDLParser.NNDLParser(stream)
    tree = parser.prog()

    # Walk the tree and generate the dot file
    dg = DotGenerator()
    walker = antlr4.ParseTreeWalker()
    walker.walk(dg, tree)

    # Use the dotgenerator's network that it figured out from the nndl file
    # to generate the cpp file
    nw = dg._network
    return nw
Exemple #10
0
    def process(self, infile: str) -> int:
        """
        Processes a SDoc1 document and returns the error count.

        :param str infile: The input filename with the SDoc2 document.
        """
        in_stream = antlr4.FileStream(infile, 'utf-8')

        lexer = sdoc2Lexer(in_stream)
        tokens = antlr4.CommonTokenStream(lexer)
        parser = sdoc2Parser(tokens)
        tree = parser.sdoc()
        visitor = SDoc2Visitor(infile, self._io)

        visitor.visit(tree)

        sdoc2.node_store.prepare_content_tree()

        return visitor.errors
Exemple #11
0
def main(argv):
    if len(argv) != 3:
        raise AttributeError('invalid number of arguments to compiler')
    input_file, project_dir = argv[1:]
    if not input_file.endswith('.lat'):
        raise AttributeError('input_file must have `.lat` extension')

    out_path = os.path.dirname(input_file)
    base_name = os.path.split(input_file)[1][:-4]
    out_base_name = os.path.join(out_path, base_name)

    input_file_stream = antlr4.FileStream(input_file)
    syntax_error_listener = LatteParserErrorListener()

    lexer = LatteLexer(input_file_stream)
    lexer.removeErrorListeners()
    lexer.addErrorListener(syntax_error_listener)
    token_stream = antlr4.CommonTokenStream(lexer)

    parser = LatteParser(token_stream)
    parser.removeErrorListeners()
    parser.addErrorListener(syntax_error_listener)
    prog_tree = parser.program()

    compiler = LLVMCompiler()
    code = compiler.visit_prog(prog_tree)
    print('OK', file=sys.stderr)

    ll_file_path = out_base_name + '.ll'
    runtime_path = os.path.join(project_dir, 'lib', 'runtime.bc')
    bc_no_runtime_path = out_base_name + '_no_runtime.bc'
    bc_final_path = out_base_name + '.bc'
    with open(ll_file_path, 'w') as f:
        f.write(code)
        print(f'Saved {ll_file_path}')
    if os.system(f'llvm-as -o {bc_no_runtime_path} {ll_file_path}') != 0:
        sys.exit(3)
    print(f'Compiled to {bc_no_runtime_path}')
    if os.system(f'llvm-link -o {bc_final_path} '
                 f'{bc_no_runtime_path} {runtime_path}') != 0:
        sys.exit(4)
    os.remove(bc_no_runtime_path)
    print(f'Linked to runtime: {bc_final_path}')
Exemple #12
0
    def __call__(self, parser, namespace, values, option_string=None):
        from azure.cli.command_modules.monitor.grammar import (
            MetricAlertConditionLexer, MetricAlertConditionParser,
            MetricAlertConditionValidator)

        string_val = ' '.join(values)

        lexer = MetricAlertConditionLexer(antlr4.InputStream(string_val))
        stream = antlr4.CommonTokenStream(lexer)
        parser = MetricAlertConditionParser(stream)
        tree = parser.expression()

        validator = MetricAlertConditionValidator()
        walker = antlr4.ParseTreeWalker()
        walker.walk(validator, tree)
        metric_condition = validator.result()
        super(MetricAlertConditionAction,
              self).__call__(parser, namespace, metric_condition,
                             option_string)
Exemple #13
0
    def create_program(self, templatefilepath, directory, filename):
        tmpfilepath = '/tmp/' + templatefilepath.split('/')[-1]
        with open(tmpfilepath, 'w') as tmpfile:
            tmpfile.write(ReOrder(templatefilepath).reordered_code)

        template = antlr4.FileStream(tmpfilepath)
        lexer = Template2Lexer(template)
        stream = antlr4.CommonTokenStream(lexer)
        parser = Template2Parser(stream)
        template = parser.template()

        walker = ParseTreeWalker()
        walker.walk(self, template)

        with open(
                directory + '/' +
                filename.split('/')[-1].replace('.template', '.py'),
                'w') as modelfile:
            modelfile.write(self.output_program)
    def process(self, infile, outfile):
        """
        Processes a SDoc1 document.

        :param str infile: The input filename with the SDoc1 document.
        :param str outfile: The output filename with the SDoc2 document.
        """
        in_stream = antlr4.FileStream(infile)
        out_stream = open(outfile, 'wt')

        lexer = sdoc1Lexer(in_stream)
        tokens = antlr4.CommonTokenStream(lexer)
        parser = sdoc1Parser(tokens)
        tree = parser.sdoc()
        visitor = SDoc1Visitor(
            root_dir=os.path.dirname(os.path.realpath(infile)))

        visitor.set_output(out_stream)
        visitor.visit(tree)
def process_sympy(sympy):
    matherror = MathErrorListener(sympy)

    stream = antlr4.InputStream(sympy)
    lex = PSLexer(stream)
    lex.removeErrorListeners()
    lex.addErrorListener(matherror)

    tokens = antlr4.CommonTokenStream(lex)
    parser = PSParser(tokens)

    # remove default console error listener
    parser.removeErrorListeners()
    parser.addErrorListener(matherror)

    relation = parser.math().relation()
    expr = convert_relation(relation)

    return expr
Exemple #16
0
def main():
    test1 = '''        &foo; ; &  fooo;
    5 bytes; 10 bytes be;
    &foo bits;
    10;'''

    test2 = '!(5 < 6) || 4'
    test3 = '[ sint : 5 bytes be ]; [uint : 5 bytes: &foo];'
    test4 = '[sint::5];'

    test5 = '''
    packet P2_generic : P2(&subtype == 3) {
        always timestamp    [ uint: 4 bytes be ];
        always version      [ uint: 2 bits ];
        always _padding     [ _ : 6 bits]; // pad to align to next byte

        variable speed {
            case(&version < 1) | v1: 0 | {
                always speed    [ uint : 2 bytes be ];
            }
            case(&version < 2) | v2: 1 | {
                always speed    [ uint : 3 bytes be ];
            }
            otherwise | cur: 2 | {
                always speed    [ uint : 4 bytes be ];
            }
        }

        optional metadata when(&version > 3) {
            always distance [ uint : 4 bytes be ];
            always end_time [ uint : 4 bytes be ];
        }
    }'''

    inp = antlr4.InputStream(test5)
    lexer = TightLexer(inp)

    stream = antlr4.CommonTokenStream(lexer)
    parser = TightParser(stream)
    tree = parser.module()

    print(tree.toStringTree(TightParser.ruleNames))
Exemple #17
0
    def get_pyro_features(self, pyrofile_path):
        import antlr4
        from antlr4 import *
        from parser.Python3Parser import Python3Parser
        from parser.Python3Lexer import Python3Lexer

        pyrofile = antlr4.FileStream(pyrofile_path)
        lexer = Python3Lexer(pyrofile)
        stream = antlr4.CommonTokenStream(lexer)
        parser = Python3Parser(stream)

        code = parser.file_input()
        walker = ParseTreeWalker()
        walker.walk(self, code)
        feature_vector = {}
        for k in self.post_map:
            if 'post' in self.post_map[k]:
                feature_vector['vi_' + self.post_map[k]['prior'] + '_' +
                               self.post_map[k]['post']] = 1
        return feature_vector
Exemple #18
0
def main():
    # command line
    parser = argparse.ArgumentParser(description='Exec/Type mu files.')
    parser.add_argument('path', type=str, help='file to exec and type')
    args = parser.parse_args()

    # lex and parse
    input_s = antlr4.FileStream(args.path, encoding='utf8')
    lexer = MuLexer(input_s)
    stream = antlr4.CommonTokenStream(lexer)
    parser = MuParser(stream)
    tree = parser.prog()

    # eval Visitor - You have some TODOS in this file!
    visitor2 = MuEvalVisitor()
    try:
        visitor2.visit(tree)
    except (MuRuntimeError, MuSyntaxError) as e:
        print(e.args[0])
        exit(1)
Exemple #19
0
def prg2py_after_preproc(data, parser_start, input_filename):
    input_stream = antlr4.InputStream(data)
    lexer = VisualFoxpro9Lexer(input_stream)
    stream = antlr4.CommonTokenStream(lexer)
    parser = VisualFoxpro9Parser(stream)
    tree = run_parser(stream, parser, parser_start)
    TreeCleanVisitor().visit(tree)
    output_tree = PythonConvertVisitor(input_filename).visit(tree)
    if not isinstance(output_tree, list):
        return output_tree
    output = add_indents(output_tree, 0)
    options = autopep8.parse_args(['--max-line-length', '100000', '-'])
    output = autopep8.fix_code(output, options)
    tokens = list(tokenize.generate_tokens(io.StringIO(output).readline))
    for i, token in enumerate(tokens):
        token = list(token)
        if token[0] == tokenize.STRING and token[1].startswith('u'):
            token[1] = token[1][1:]
        tokens[i] = tuple(token)
    return tokenize.untokenize(tokens)
Exemple #20
0
def initialize_transform_rules():

    # 文件中的自定义转换规则
    rule_file = './rule.txt'

    input = antlr4.FileStream(rule_file)
    lexer = rulesLexer(input)
    stream = antlr4.CommonTokenStream(lexer)
    parser = rulesParser(stream)
    tree = parser.rules()

    rules = set()
    for single_rule in tree.getChildren():  # 这里的child下标和g4文件定义的规则相关
        rule_name = single_rule.getChild(0).getText()  # 最前面的部分是规则名称
        origin_parse_tree = expr.ParseTree(
            single_rule.getChild(2))  # input pattern
        transformed_parse_tree = expr.ParseTree(
            single_rule.getChild(4))  # output pattern
        if single_rule.getChildCount() == 10:
            cp_parse_tree = None
            pp_parse_tree = None
            weight = int(single_rule.getChild(8).getText()[1:])

        elif single_rule.getChildCount() == 11:
            if single_rule.getChild(6).getText() != '@':
                cp_parse_tree = expr.ParseTree(single_rule.getChild(6))
                pp_parse_tree = None
            else:
                cp_parse_tree = None
                pp_parse_tree = expr.ParseTree(single_rule.getChild(7))
            weight = int(single_rule.getChild(9).getText()[1:])

        else:
            cp_parse_tree = expr.ParseTree(single_rule.getChild(6))
            pp_parse_tree = expr.ParseTree(single_rule.getChild(8))
            weight = int(single_rule.getChild(10).getText()[1:])

        rules.add(
            TransformRule(rule_name, origin_parse_tree, transformed_parse_tree,
                          cp_parse_tree, pp_parse_tree, weight))
    return rules
Exemple #21
0
def _parse_input_stream(input_stream:antlr4.InputStream) -> RootNode:
    error_listener = _ConsoleErrorListener()

    lexer = JSONPathLexer(input_stream)

    lexer.addErrorListener(error_listener)

    token_stream = antlr4.CommonTokenStream(lexer)

    parser = _JSONPathParser(token_stream)

    parser.addErrorListener(error_listener)

    tree = parser.jsonpath()

    listener = _JSONPathListener(_stack=[])

    walker = antlr4.ParseTreeWalker()
    walker.walk(listener, tree)

    return listener._stack.pop()
Exemple #22
0
def main(argv):
    if len(argv) != 4:
        raise AttributeError('invalid number of arguments to compiler')
    input_file, target_vm, project_dir = argv[1:]
    if not input_file.endswith('.ins'):
        raise AttributeError('input_file must have `ins` extension')

    out_path = os.path.dirname(input_file)
    base_name = os.path.split(input_file)[1][:-4]
    out_base_name = os.path.join(out_path, base_name)

    input_file_stream = antlr4.FileStream(input_file)
    lexer = InstantLexer(input_file_stream)
    token_stream = antlr4.CommonTokenStream(lexer)
    parser = InstantParser(token_stream)
    prog_tree = parser.prog()
    if target_vm == 'jvm':
        compiler = JVMCompiler(base_name)
    elif target_vm == 'llvm':
        compiler = LLVMCompiler()
    else:
        raise AttributeError(f'unknown target VM: `{target_vm}`')

    code = compiler.visit_prog(prog_tree)

    if target_vm == 'llvm':
        ll_file_path = out_base_name + '.ll'
        bc_file_path = out_base_name + '.bc'
        with open(ll_file_path, 'w') as f:
            f.write(code)
            print(f'Saved {ll_file_path}')
        os.system(f'llvm-as {ll_file_path} -o {bc_file_path}')
        print(f'Compiled to {bc_file_path}')
    elif target_vm == 'jvm':
        j_file_path = out_base_name + '.j'
        with open(j_file_path, 'w') as f:
            f.write(code)
            print(f'Saved {j_file_path}')
        jasmin_path = os.path.join(project_dir, 'lib', 'jasmin.jar')
        os.system(f'java -jar {jasmin_path} -d {out_path} {j_file_path}')
Exemple #23
0
    def visitCmd_include(self, ctx):
        """
        Includes another SDoc into this SDoc.

        :param sdoc1Parser.Cmd_includeContext ctx: The parse tree.
        """
        # Test the maximum include level.
        if self._include_level >= self._options['max_include_level']:
            raise RuntimeError(
                "Maximum include level exceeded."
            )  # @todo More verbose logging, own exception class.

        # Open a stream for the sub-document.
        file_name = sdoc.unescape(ctx.SIMPLE_ARG().getText())
        if not os.path.isabs(file_name):
            file_name = os.path.join(self._root_dir, file_name + '.sdoc')
        print("Including %s" % os.path.relpath(file_name))
        stream = antlr4.FileStream(file_name, 'utf-8')

        # root_dir

        # Create a new lexer and parser for the sub-document.
        lexer = sdoc1Lexer(stream)
        tokens = antlr4.CommonTokenStream(lexer)
        parser = sdoc1Parser(tokens)
        tree = parser.sdoc()

        # Create a visitor.
        visitor = SDoc1Visitor(
            root_dir=os.path.dirname(os.path.realpath(file_name)))

        # Set or inherit properties from the parser of the parent document.
        visitor._include_level = self._include_level + 1
        visitor.set_output(self._output)
        visitor._set_global_scope(self._global_scope)

        # Run the visitor on the parse tree.
        visitor.visit(tree)

        self.put_position(ctx, 'stop')
Exemple #24
0
def main(argv):

    input_filename = argv[1]
    output_filename = argv[2]
    input_file = AntlrCaseInsensitiveFileInputStream(input_filename)
    lexer = PlSqlLexer(input_file)
    stream = antlr4.CommonTokenStream(lexer)
    parser = PlSqlParser(stream)
    tree = parser.sql_script()
    visitor = ScriptVisitor()
    node = tree.accept(visitor)
    #print(ast.dump(node))
    #astpretty.pprint(node) # este modulo esta malo. no usar :(
    try:
        code = astor.to_source(node)
    except:
        print(ast.dump(node))
        raise

    output = open(output_filename, "w")
    output.write(code)
    output.close()
Exemple #25
0
def casos_de_teste_semantico():
    with open('oi.txt', 'r') as caso_de_teste:
        mapa = caso_de_teste.read()
        mapa_input = antlr4.InputStream(mapa)

        lexer = TileMapLexer(input=mapa_input)
        lexer.removeErrorListeners()
        tokens = antlr4.CommonTokenStream(lexer=lexer)

        parser = TileMapParser(tokens)

        parser.removeErrorListeners()
        erros_sintaticos = ErrosSintaticosErrorListener()
        parser.addErrorListener(erros_sintaticos)
        try:
            mapa = parser.mapa()
            analisador_semantico = Semantico()
            analisador_semantico.visitMap(mapa)
            print('Compilação finalizada')
        except Exception as e:
            print(str(e), file=sys.stderr)
            pass
Exemple #26
0
    def _parse_rule(self):
        stream = antlr4.InputStream(self.rule)
        fl = CFLexer(stream)
        fl.removeErrorListeners()
        fl.addErrorListener(FilterErrorListener.INSTANCE)
        token_stream = antlr4.CommonTokenStream(fl)
        tree = CFilter(token_stream)
        tree.removeErrorListeners()
        tree.addErrorListener(FilterErrorListener.INSTANCE)
        visitor = MyVisitor()
        self.identifiers, self.node = visitor.visit_res(tree.root())

        # identifiers is None or len(self.identifiers) == 0
        # 表示表达式没有变量,所以可以直接计算出表达式的值
        # 此时可以提前计算表达式的值的函数更换visit_res为visit_const_res

        if self.identifiers is None or len(self.identifiers) == 0:
            self.node = self.node.visit()
            self.__dict__["visit_res"] = lambda *args, **kwargs: self.node
            self.is_const = True
        else:
            self.is_const = False
Exemple #27
0
def casos_de_teste_gerador():
    print('-------------------------------------')
    print('CASOS DE TESTE DO GERADOR DE CÓDIGO ')
    print('------------------------------------')
    for i in range(1, 2):
        with open(DIRETORIO_PROJETO + CAMINHO_ARQUIVOS_ENTRADA +
                  GERACAO_DE_CODIGO + 'ct_gerador_' + str(i) + '.txt',
                  encoding='utf-8') as caso_de_teste:
            programa = caso_de_teste.read()
            programa_input = antlr4.InputStream(programa)

            lexer = tileLexer(input=programa_input)
            lexer.removeErrorListeners()
            tokens = antlr4.CommonTokenStream(lexer=lexer)

            parser = tileParser(tokens)

            parser.removeErrorListeners()
            erros_sintaticos = ErrosSintaticosErrorListener()
            parser.addErrorListener(erros_sintaticos)
            try:
                programa = parser.mapa()

                gerador_de_codigo = GeradorCodigo()
                gerador_de_codigo.visitMapa(programa)

                codigo_gerado = gerador_de_codigo.getCodigo()

                arquivo = DIRETORIO_PROJETO + CAMINHO_ARQUIVOS_SAIDA + GERACAO_DE_CODIGO + 'ct_gerador_' + \
                          str(i) + '.html'

                arquivo_saida = open(arquivo, 'w', encoding='utf-8')
                arquivo_saida.write(codigo_gerado)
                arquivo_saida.close()

                print('[CT' + str(i) + '_GERADOR] Codigo gerado!')
            except Exception as e:
                print('[CT' + str(i) + '_GERADOR] ' + str(e), file=sys.stderr)
                pass
Exemple #28
0
    def parse_ctc(self, ctc: str):
        tree = None

        words = ctc.split(' ')
        final_ctc = ''
        aux = False
        for word in words:
            if word == 'NOT' or word == '(NOT':
                final_ctc = final_ctc + ' (' + word + ' ('
                aux = True
            elif aux:
                final_ctc = final_ctc + word + ')) '
                aux = False
            else:
                final_ctc = final_ctc + " " + word
        final_ctc = final_ctc[1:]

        lexer = propositionalLexer(antlr4.InputStream(final_ctc))
        stream = antlr4.CommonTokenStream(lexer)
        parser = propositionalParser(stream)
        tree = parser.formula()
        return tree
Exemple #29
0
    def create_program(self):
        template = antlr4.FileStream(self.templatefile)
        lexer = TemplateLexer(template)
        stream = antlr4.CommonTokenStream(lexer)
        parser = TemplateParser(stream)
        self.visit(parser.template())

        # write data
        with open(self._directory + '/data.json', 'w') as datafile:
            for k in six.iterkeys(self.data_json):
                if isinstance(self.data_json[k], np.ndarray):
                    self.data_json[k] = self.data_json[k].tolist()
            json.dump(self.data_json, datafile)

        with open(self._directory + '/model.stan', 'w') as modelfile:
            modelfile.write(self.output_program)
            modelfile.write(self.model_string)
            try:
                if self.config["stan"]["quants"] is True:
                    modelfile.write(self.add_quants())
            except:
                pass
Exemple #30
0
def parse_query(query_str):
    """ Parses GAE search query and returns easily readable
  composition of Expressions.

  Args:
    query_str: a str representing GAE search query.
  Returns:
    An Expression or ExpressionsGroup corresponding to query_str.
  """
    input_stream = antlr4.InputStream(query_str)
    lexer = queryLexer(input_stream)
    stream = antlr4.CommonTokenStream(lexer)
    parser = queryParser(stream)
    raw_query_node = parser.query()  # MATCHED: exprs_seq EOF
    if parser.getNumberOfSyntaxErrors():
        if 'distance(' in query_str:
            msg = 'Searching on GeoPoint fields is not supported yet.'
        else:
            msg = 'Failed to parse query string: "{}"'.format(query_str)
        raise InvalidRequest(msg)
    exprs_seq_node = raw_query_node.children[0]
    return _process_exprs_seq(exprs_seq_node)