def compile(source, filename): # @ReservedAssignment tokens = lexer.tokenize(source) parser.init(tokens) tree = parser.program() def error(message, token, level='Error'): if token is None: # should be avoided for good error messages sys.stderr.write('%s %s\n' % (level, message)) else: lines = source.splitlines() sys.stderr.write('%s\n%s^\n' % (lines[token.line_no-1], ' '*(token.col_no-1))) sys.stderr.write('%s:%d:%d: %s %s\n' % (filename, token.line_no, token.col_no, level, message)) scopes.error = error print 'S-expr:', ast.stree(tree) print ast.ptree(tree) print code, globls = scopes.build(tree) for name, symbol in globls.names.items(): print name, symbol print for line in code: print '\t'.join(map(str, line)) return code, globls
def run(): try: parser.init() stage.init() graphics.init() theme.init() gameloop.start() except KeyboardInterrupt: exit()
def run(): try: # Init the game parser.init() # Check for editor if (parser.args.editor): os.system("/usr/share/make-snake/snake-editor/__main__.py") sys.exit(0) graphics.init() theme.init() stage.init() game.reset() # Start the game gameloop.start() except KeyboardInterrupt: exit()
def main(): cmd = argparse.ArgumentParser(description="Grok/Query/Aggregate log files. Requires python2 >= 2.7") typ = cmd.add_mutually_exclusive_group(required=True) typ.add_argument('-t', '--type', metavar='TYPE', choices=logformat.TYPES, help='{%s} Use built-in log type (default: apache-common)'%', '.join(logformat.TYPES), default='apache-common') typ.add_argument('-f', '--format', action='store', help='Log format (use apache LogFormat string)') typ.add_argument('-C', '--config', type=argparse.FileType('r'), help='httpd.conf file in which to find LogFormat string (requires -T)') cmd.add_argument('-T', '--ctype', help='type-name for LogFormat from specified httpd.conf file (only works with -c)') cmd.add_argument('-j', '--processes', action='store', type=int, help='Number of processes to fork for log crunching (default: smart)', default=parallel.SMART) cmd.add_argument('-l', '--lines', action='store', type=int, help='Only process LINES lines of input') interactive = cmd.add_mutually_exclusive_group(required=False) interactive.add_argument('-i', '--interactive', action='store_true', help="Use line-based interactive interface") interactive.add_argument('-c', '--curses', action='store_true', help=argparse.SUPPRESS) interactive.add_argument('-q', '--query', help="The query to run") cmd.add_argument('-d', '--debug', action='store_true', help="Turn debugging on (you don't want this)") cmd.add_argument('logfile', nargs='+', type=argparse.FileType('r'), help="log(s) to parse/query") args = cmd.parse_args(sys.argv[1:]) if args.config and not args.ctype: cmd.error("-C/--config option requires -T/--ctype option") if args.ctype and not args.config: cmd.error("-T/--ctype only works with -C/--config option") if args.config and args.ctype: config = args.config.read() args.config.close() m = re.search(r'^logformat[\s]+(.*)[\s]+%s' % args.ctype, config, re.I|re.M) if m is None: cmd.error("LogFormat %s not found in %s" % (args.ctype, args.config.name)) format = m.group(1) if (format.startswith("'") or format.startswith('"')) and (format.endswith("'") or format.endswith('"')): format = format[1:-1] args.format = format.replace(r"\'", "'").replace(r'\"', '"') global DEBUG DEBUG = args.debug parser.DEBUG = DEBUG parallel.DEBUG = DEBUG sqlfuncs.DEBUG = DEBUG parser.init() parallel.numprocs = args.processes LoGrok(args, interactive=args.interactive, curses=args.curses)
def main(): arg_parser = parser.init() # Execute the parse_args() method args = arg_parser.parse_args() input_path = args.Path if not os.path.exists(input_path) or not os.path.isfile(input_path): print('The path specified does not exist') sys.exit() words = [] with open(input_path) as fp: for line in fp: line = line.strip() words.append(line) permutate(words)
hello; a + b-c*2; /* kommentar */ } def foo(a:int, b:int):int { return a + b; } 7*8; if 1 { // kommentar hello;} ''') parser.init(tokens) tree = parser.program() print ast.ptree(tree) def compile(source, filename): # @ReservedAssignment tokens = lexer.tokenize(source) parser.init(tokens) tree = parser.program() def error(message, token, level='Error'): if token is None: # should be avoided for good error messages sys.stderr.write('%s %s\n' % (level, message)) else: lines = source.splitlines()
import os from pathlib import Path from jinja2 import Template import parser from utils import write_to_file from utils import mkdir_p parser.init() # parse and assign to vars spec = parser.spec def _concat(slice: str) -> str: """helper to concatenate each template slice.""" return "{}\n".format(slice) def slices_filename_content_hash() -> dict: """create a dict of filename: content for slices""" docker_slices = {} path = Path.cwd().joinpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), "slices")) for file in path.iterdir(): docker_slices[file.name] = file.read_text() return docker_slices def concat_slices(component: str = "tensorflow", flavor: str = "mkl") -> str:
from bottle import Bottle, request from esql.utility.configure import Environment # create wsgi app app = application = Bottle() # create esql environment env = Environment() from parser import init, Processor init(env.config['parser']['optimize'], env.config['parser']['debug']) @app.route('/es', method=('GET', 'POST')) def execute(): """ Execute Sql in ES """ request_data = request.forms if request.method == 'POST' else request.query sql = request_data.get('sql') return Processor.execute(sql) # print(Processor.execute('create table table_name.info (a string, b integer);'))
import os import sys import cson import pathlib from collections import OrderedDict esql_parser = os.path.realpath( os.path.join(__file__, '..', '..', 'libs', 'EsqlParser')) sys.path.insert(0, esql_parser) from parser import init from ql.parse.ASTNode import Node from esql import Processor from esql.utility.configure import load_cson init(False, False) tests_data_path = os.path.realpath(os.path.join(__file__, '..', 'data')) def _dict(self): """ Generate serializable dict for unit test """ from ql.parse.parser import TK name = self.type.name ret = OrderedDict({'type': name[4:] if name.startswith('TOK_') else name}) if self.value and self.type not in [TK.TOK_DOT, TK.TOK_KEY_VALUE]: ret['value'] = self.value if self.children: # ret['children'] = []
import parser import os # # parser.init(csv_file_name = "persons.csv",config_file_name="test") # # parser.parserFile(doc_id = "ID0000", clinical_note_file_name = "/Users/yuanpan/Documents/NLP_project/input/mttest/mtsamples-type-3-sample-343.txt") # # parser.parserFile(doc_id = "ID0001",clinical_note_file_name = "/Users/yuanpan/Documents/NLP_project/input/mttest/mtsamples-type-3-sample-344.txt") # # parser.processDocument(docID = "djks",content = "jdkfal") # parser.init(csv_file_name="persons.csv", config_file_name="test_drugs.yaml") parser.parserFile( doc_id="41-2280", clinical_note_file_name= "/Users/yuanpan/Documents/NLP_project/input/mttest/mtsamples-type-41-sample-2280.txt" ) parser.parserFile( doc_id="91-1439", clinical_note_file_name= "/Users/yuanpan/Documents/NLP_project/input/mttest/mtsamples-type-91-sample-1439.txt" ) parser.parserFile( doc_id="95-520", clinical_note_file_name= "/Users/yuanpan/Documents/NLP_project/input/mttest/mtsamples-type-95-sample-520.txt" ) # config_directory = '/Users/yuanpan/Documents/NLP_project/input/mtsamples/' # for filename in os.listdir(config_directory): # # try: # if filename.endswith(".txt"): # print(filename) # parser.parserFile(doc_id = filename,clinical_note_file_name = config_directory+"/"+filename) # # except:
def parse(tokenized_list, xml_format=False): parser.init(tokenized_list) parser.parse() if xml_format: parser.get_xml_format() return parser.get_parsed_code()