def main(argv): # type: (List[str]) -> int arena = alloc.Arena() arena.PushSource(source.Stdin('')) parse_opts = parse_lib.OilParseOptions() # Dummy value; not respecting aliases! aliases = {} # type: Dict[str, Any] # parse `` and a[x+1]=bar differently loader = pyutil.GetResourceLoader() oil_grammar = meta.LoadOilGrammar(loader) parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar, one_pass_parse=True) line_reader = reader.FileLineReader(mylib.Stdin(), arena) c_parser = parse_ctx.MakeOshParser(line_reader) try: node = main_loop.ParseWholeFile(c_parser) except util.ParseError as e: ui.PrettyPrintError(e, arena) return 2 assert node is not None tree = node.AbbreviatedTree() #tree = node.PrettyTree() ast_f = fmt.DetectConsoleOutput(mylib.Stdout()) fmt.PrintTree(tree, ast_f) ast_f.write('\n') return 0
def InitCommandParser(code_str, arena=None): arena = arena or MakeArena('<test_lib>') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None) line_reader, _ = InitLexer(code_str, arena) c_parser = parse_ctx.MakeOshParser(line_reader) return c_parser
def testNoInfiniteLoop(self): # This was ONE place where we got an infinite loop. with open('testdata/completion/return-124.bash') as f: code_str = f.read() trail = parse_lib.Trail() arena = test_lib.MakeArena('<completion_test.py>') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None, trail=trail) comp_lookup = completion.Lookup() ex = test_lib.EvalCode(code_str, parse_ctx, comp_lookup=comp_lookup) r = _MakeRootCompleter(parse_ctx=parse_ctx, comp_lookup=comp_lookup) m = list(r.Matches(MockApi('bad '))) self.assertEqual([], sorted(m)) # Error: spec not changed m = list(r.Matches(MockApi('both '))) self.assertEqual([], sorted(m)) # Redefines completions m = list(r.Matches(MockApi('both2 '))) self.assertEqual(['both2 b1 ', 'both2 b2 '], sorted(m))
def testRunsUserDefinedFunctions(self): # This is here because it's hard to test readline with the spec tests. with open('testdata/completion/osh-unit.bash') as f: code_str = f.read() trail = parse_lib.Trail() arena = test_lib.MakeArena('<completion_test.py>') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None, trail=trail) comp_lookup = completion.Lookup() ex = test_lib.EvalCode(code_str, parse_ctx, comp_lookup=comp_lookup) r = _MakeRootCompleter(comp_lookup=comp_lookup) # By default, we get a space on the end. m = list(r.Matches(MockApi('mywords t'))) self.assertEqual(['mywords three ', 'mywords two '], sorted(m)) # No space m = list(r.Matches(MockApi('mywords_nospace t'))) self.assertEqual(['mywords_nospace three', 'mywords_nospace two'], sorted(m)) # Filtered out two and bin m = list(r.Matches(MockApi('flagX '))) self.assertEqual(['flagX one ', 'flagX three '], sorted(m)) # Filter out everything EXCEPT two and bin m = list(r.Matches(MockApi('flagX_bang '))) self.assertEqual(['flagX_bang bin ', 'flagX_bang two '], sorted(m)) # -X with -P m = list(r.Matches(MockApi('flagX_prefix '))) self.assertEqual(['flagX_prefix __one ', 'flagX_prefix __three '], sorted(m)) # -P with plusdirs m = list(r.Matches(MockApi('prefix_plusdirs b'))) self.assertEqual([ 'prefix_plusdirs __bin ', 'prefix_plusdirs benchmarks/', 'prefix_plusdirs bin/', 'prefix_plusdirs build/' ], sorted(m)) # -X with plusdirs. We're filtering out bin/, and then it's added back by # plusdirs. The filter doesn't kill it. m = list(r.Matches(MockApi('flagX_plusdirs b'))) self.assertEqual([ 'flagX_plusdirs benchmarks/', 'flagX_plusdirs bin/', 'flagX_plusdirs build/' ], sorted(m)) # -P with dirnames. -P is NOT respected. m = list(r.Matches(MockApi('prefix_dirnames b'))) self.assertEqual([ 'prefix_dirnames benchmarks/', 'prefix_dirnames bin/', 'prefix_dirnames build/' ], sorted(m))
def _MakeHistoryEvaluator(history_items): arena = test_lib.MakeArena('<reader_test.py>') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None) parse_ctx.Init_Trail(parse_lib.Trail()) debug_f = util.DebugFile(sys.stdout) readline = _MockReadlineHistory(history_items) return history.Evaluator(readline, parse_ctx, debug_f)
def MakeTestEvaluator(): arena = MakeArena('<MakeTestEvaluator>') mem = state.Mem('', [], {}, arena) parse_opts = parse_lib.OilParseOptions() exec_opts = state.ExecOpts(mem, parse_opts, None) exec_deps = cmd_exec.Deps() exec_deps.splitter = split.SplitContext(mem) ev = word_eval.CompletionWordEvaluator(mem, exec_opts, exec_deps, arena) return ev
def setUp(self): """Done on every test.""" self.arena = alloc.Arena() self.arena.PushSource(source.Unused('')) parse_opts = parse_lib.OilParseOptions() loader = pyutil.GetResourceLoader() oil_grammar = meta.LoadOilGrammar(loader) self.parse_ctx = parse_lib.ParseContext(self.arena, parse_opts, {}, oil_grammar, one_pass_parse=True)
def InitWordParser(word_str, oil_at=False, arena=None): arena = arena or MakeArena('<test_lib>') parse_opts = parse_lib.OilParseOptions() parse_opts.parse_at = oil_at loader = pyutil.GetResourceLoader() oil_grammar = meta.LoadOilGrammar(loader) parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, oil_grammar) line_reader, _ = InitLexer(word_str, arena) c_parser = parse_ctx.MakeOshParser(line_reader) # Hack return c_parser.w_parser
def _Detect(test, word_str, expected): # TODO: This function could be moved to test_lib. log('-'*80) w = word_parse_test._assertReadWord(test, word_str) actual = word_.DetectShAssignment(w) left_token, close_token, part_offset = actual expected_left, expected_close, expected_part_offset = expected print(left_token, close_token, part_offset) print() if expected_left is None: test.assertEqual(None, left_token) else: test.assertEqual(expected_left, left_token.id) if expected_close is None: test.assertEqual(None, close_token) else: test.assertEqual(expected_left, left_token.id) test.assertEqual(expected_part_offset, part_offset) arena = test_lib.MakeArena('word_test.py') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None) if left_token and left_token.id in (Id.Lit_VarLike, Id.Lit_ArrayLhsOpen): more_env = [] preparsed = (left_token, close_token, part_offset, w) try: cmd_parse._AppendMoreEnv([preparsed], more_env) except Exception as e: log('Error: %s', e) else: log('more_env: %s', more_env) try: assign_pair = cmd_parse._MakeAssignPair(parse_ctx, preparsed) except Exception as e: log('Error: %s', e) else: log('assign_pair: %s', assign_pair)
def ParseAndEval(code_str): arena = test_lib.MakeArena('<arith_parse_test.py>') w_parser = test_lib.InitWordParser(code_str, arena=arena) w_parser._Next(lex_mode_e.Arith) # Calling private method anode = w_parser._ReadArithExpr() # need the right lex state? print('node:', anode) mem = state.Mem('', [], {}, arena) parse_opts = parse_lib.OilParseOptions() exec_opts = state.ExecOpts(mem, parse_opts, None) exec_deps = cmd_exec.Deps() splitter = split.SplitContext(mem) exec_deps.splitter = splitter ev = word_eval.CompletionWordEvaluator(mem, exec_opts, exec_deps, arena) arith_ev = expr_eval.ArithEvaluator(mem, exec_opts, ev, arena) return arith_ev.EvalToInt(anode)
def testCompletesAliases(self): # I put some aliases in this file. with open('testdata/completion/osh-unit.bash') as f: code_str = f.read() trail = parse_lib.Trail() arena = test_lib.MakeArena('<completion_test.py>') parse_opts = parse_lib.OilParseOptions() aliases = {} parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, None, trail=trail) comp_lookup = completion.Lookup() ex = test_lib.EvalCode(code_str, parse_ctx, comp_lookup=comp_lookup, aliases=aliases) r = _MakeRootCompleter(parse_ctx=parse_ctx, comp_lookup=comp_lookup) # The original command m = list(r.Matches(MockApi('ls '))) self.assertEqual(['ls one ', 'ls two '], sorted(m)) # Alias for the command m = list(r.Matches(MockApi('ll '))) self.assertEqual(['ll one ', 'll two '], sorted(m)) # DOUBLE alias expansion goes back to original m = list(r.Matches(MockApi('ll_classify '))) self.assertEqual(['ll_classify one ', 'll_classify two '], sorted(m)) # Trailing space m = list(r.Matches(MockApi('ll_trailing '))) self.assertEqual(['ll_trailing one ', 'll_trailing two '], sorted(m)) # It should NOT clobber completio registered for aliases m = list(r.Matches(MockApi('ll_own_completion '))) self.assertEqual( ['ll_own_completion own ', 'll_own_completion words '], sorted(m))
def _MakeRootCompleter(parse_ctx=None, comp_lookup=None): #comp_state = comp_state or completion.State() compopt_state = completion.OptionState() comp_ui_state = comp_ui.State() comp_lookup = comp_lookup or completion.Lookup() ev = test_lib.MakeTestEvaluator() if not parse_ctx: arena = alloc.Arena() arena.PushSource(source.MainFile('<_MakeRootCompleter>')) parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None) parse_ctx.Init_Trail(parse_lib.Trail()) parse_ctx.Init_OnePassParse(True) if 1: # enable for details debug_f = util.DebugFile(sys.stdout) else: debug_f = util.NullDebugFile() return completion.RootCompleter(ev, mem, comp_lookup, compopt_state, comp_ui_state, parse_ctx, debug_f)
def main(argv): action = argv[1] argv = argv[2:] # Common initialization arith_ops = {} for _, token_str, id_ in meta.ID_SPEC.LexerPairs(Kind.Arith): arith_ops[token_str] = id_ if 0: from pprint import pprint pprint(arith_ops) tok_def = OilTokenDef(arith_ops) if action == 'marshal': # generate the grammar and parse it grammar_path = argv[0] out_dir = argv[1] basename, _ = os.path.splitext(os.path.basename(grammar_path)) # HACK for find: if basename == 'find': from tools.find import tokenizer as find_tokenizer tok_def = find_tokenizer.TokenDef() with open(grammar_path) as f: gr = pgen.MakeGrammar(f, tok_def=tok_def) marshal_path = os.path.join(out_dir, basename + '.marshal') with open(marshal_path, 'wb') as out_f: gr.dump(out_f) nonterm_path = os.path.join(out_dir, basename + '_nt.py') with open(nonterm_path, 'w') as out_f: gr.dump_nonterminals(out_f) log('Compiled %s -> %s and %s', grammar_path, marshal_path, nonterm_path) #gr.report() elif action == 'parse': # generate the grammar and parse it # Remove build dependency from frontend import parse_lib from oil_lang import expr_parse grammar_path = argv[0] start_symbol = argv[1] code_str = argv[2] # For choosing lexer and semantic actions grammar_name, _ = os.path.splitext(os.path.basename(grammar_path)) with open(grammar_path) as f: gr = pgen.MakeGrammar(f, tok_def=tok_def) arena = alloc.Arena() lex = MakeOilLexer(code_str, arena) is_expr = grammar_name in ('calc', 'grammar') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, gr) p = expr_parse.ExprParser(parse_ctx, gr) try: pnode, _ = p.Parse(lex, gr.symbol2number[start_symbol]) except parse.ParseError as e: log('Parse Error: %s', e) return 1 names = parse_lib.MakeGrammarNames(gr) p_printer = expr_parse.ParseTreePrinter(names) # print raw nodes p_printer.Print(pnode) if is_expr: from oil_lang import expr_to_ast tr = expr_to_ast.Transformer(gr) if start_symbol == 'eval_input': ast_node = tr.Expr(pnode) else: ast_node = tr.OilAssign(pnode) ast_node.PrettyPrint() print() elif action == 'stdlib-test': # This shows how deep Python's parse tree is. It doesn't use semantic # actions to prune on the fly! import parser # builtin module t = parser.expr('1+2') print(t) t2 = parser.st2tuple(t) print(t2) else: raise RuntimeError('Invalid action %r' % action)
def InitExecutor(parse_ctx=None, comp_lookup=None, arena=None, mem=None, aliases=None, ext_prog=None): if parse_ctx: arena = parse_ctx.arena parse_opts = parse_ctx.parse_opts else: arena or MakeArena('<InitExecutor>') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None) mem = mem or state.Mem('', [], {}, arena) exec_opts = state.ExecOpts(mem, parse_opts, None) errfmt = ui.ErrorFormatter(arena) job_state = process.JobState() fd_state = process.FdState(errfmt, job_state) funcs = {} aliases = {} if aliases is None else aliases compopt_state = completion.OptionState() comp_lookup = comp_lookup or completion.Lookup() readline = None # simulate not having it new_var = builtin_assign.NewVar(mem, funcs, errfmt) builtins = { # Lookup builtin_e.ECHO: builtin_pure.Echo(exec_opts), builtin_e.SHIFT: builtin_assign.Shift(mem), builtin_e.HISTORY: builtin.History(readline), builtin_e.COMPOPT: builtin_comp.CompOpt(compopt_state, errfmt), builtin_e.COMPADJUST: builtin_comp.CompAdjust(mem), builtin_e.ALIAS: builtin_pure.Alias(aliases, errfmt), builtin_e.UNALIAS: builtin_pure.UnAlias(aliases, errfmt), builtin_e.DECLARE: new_var, builtin_e.TYPESET: new_var, builtin_e.LOCAL: new_var, builtin_e.EXPORT: builtin_assign.Export(mem, errfmt), builtin_e.READONLY: builtin_assign.Readonly(mem, errfmt), } # For the tests, we do not use 'readline'. exec_opts = state.ExecOpts(mem, parse_opts, None) debug_f = util.DebugFile(sys.stderr) exec_deps = cmd_exec.Deps() exec_deps.search_path = state.SearchPath(mem) exec_deps.errfmt = errfmt exec_deps.trap_nodes = [] exec_deps.job_state = job_state exec_deps.waiter = process.Waiter(exec_deps.job_state, exec_opts) exec_deps.ext_prog = \ ext_prog or process.ExternalProgram('', fd_state, exec_deps.search_path, errfmt, debug_f) exec_deps.dumper = dev.CrashDumper('') exec_deps.debug_f = debug_f exec_deps.trace_f = debug_f splitter = split.SplitContext(mem) exec_deps.splitter = splitter word_ev = word_eval.NormalWordEvaluator(mem, exec_opts, exec_deps, arena) exec_deps.word_ev = word_ev arith_ev = expr_eval.ArithEvaluator(mem, exec_opts, word_ev, arena) exec_deps.arith_ev = arith_ev word_ev.arith_ev = arith_ev # Circular bool_ev = expr_eval.BoolEvaluator(mem, exec_opts, word_ev, arena) exec_deps.bool_ev = bool_ev tracer = dev.Tracer(parse_ctx, exec_opts, mem, word_ev, debug_f) exec_deps.tracer = tracer ex = cmd_exec.Executor(mem, fd_state, funcs, builtins, exec_opts, parse_ctx, exec_deps) spec_builder = builtin_comp.SpecBuilder(ex, parse_ctx, word_ev, splitter, comp_lookup) # Add some builtins that depend on the executor! complete_builtin = builtin_comp.Complete(spec_builder, comp_lookup) builtins[builtin_e.COMPLETE] = complete_builtin builtins[builtin_e.COMPGEN] = builtin_comp.CompGen(spec_builder) return ex
def testMatchesOracle(self): for i, case in enumerate(bash_oracle.CASES): # generated data flags = case.get('_init_completion_flags') if flags is None: continue # This was input code_str = case['code'] assert code_str.endswith('\t') log('') log('--- Case %d: %r with flags %s', i, code_str, flags) log('') #print(case) oracle_comp_words = case['COMP_WORDS'] oracle_comp_cword = case['COMP_CWORD'] oracle_comp_line = case['COMP_LINE'] oracle_comp_point = case['COMP_POINT'] # Init completion data oracle_words = case['words'] oracle_cur = case['cur'] oracle_prev = case['prev'] oracle_cword = case['cword'] oracle_split = case['split'] # # First test some invariants on the oracle's data. # self.assertEqual(code_str[:-1], oracle_comp_line) # weird invariant that always holds. So isn't COMP_CWORD useless? self.assertEqual(int(oracle_comp_cword), len(oracle_comp_words) - 1) # Another weird invariant. Note this is from the bash ORACLE, not from # our mocks. self.assertEqual(int(oracle_comp_point), len(code_str) - 1) # # Now run a piece of code that compares OSH's actual data against the # oracle. # init_code = _INIT_TEMPLATE % { 'flags': ' '.join(flags), 'command': oracle_comp_words[0] } arena = test_lib.MakeArena('<InitCompletionTest>') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, None) mem = state.Mem('', [], {}, arena) # # Allow our code to access oracle data # state.SetGlobalArray(mem, 'ORACLE_COMP_WORDS', oracle_comp_words) state.SetGlobalString(mem, 'ORACLE_COMP_CWORD', oracle_comp_cword) state.SetGlobalString(mem, 'ORACLE_COMP_LINE', oracle_comp_line) state.SetGlobalString(mem, 'ORACLE_COMP_POINT', oracle_comp_point) state.SetGlobalArray(mem, 'ORACLE_words', oracle_words) state.SetGlobalString(mem, 'ORACLE_cur', oracle_cur) state.SetGlobalString(mem, 'ORACLE_prev', oracle_prev) state.SetGlobalString(mem, 'ORACLE_cword', oracle_cword) state.SetGlobalString(mem, 'ORACLE_split', oracle_split) comp_lookup = completion.Lookup() ex = test_lib.EvalCode(init_code, parse_ctx, comp_lookup=comp_lookup, mem=mem) r = _MakeRootCompleter(comp_lookup=comp_lookup) comp = MockApi(code_str[:-1]) m = list(r.Matches(comp)) log('matches = %s', m) # Unterminated quote in case 5. Nothing to complete. # TODO: use a label if i == 5: continue # Our test shell script records what passed in an array. val = mem.GetVar('PASSED') self.assertEqual(value_e.MaybeStrArray, val.tag, "Expected array, got %s" % val) actually_passed = val.strs should_pass = [ 'COMP_WORDS', 'COMP_CWORD', 'COMP_LINE', 'COMP_POINT', # old API 'words', 'cur', 'prev', 'cword', 'split' # new API ] if i == 4: should_pass.remove('COMP_WORDS') should_pass.remove('COMP_CWORD') should_pass.remove('cword') should_pass.remove('words') # double quotes aren't the same for t in should_pass: self.assert_(t in actually_passed, "%r was expected to pass (case %d)" % (t, i)) log('Ran %d cases', len(bash_oracle.CASES))
def main(argv): action = argv[1] argv = argv[2:] # Used at grammar BUILD time. OPS = { '.': Id.Expr_Dot, '->': Id.Expr_RArrow, '::': Id.Expr_DColon, '@': Id.Expr_At, '...': Id.Expr_Ellipsis, '$': Id.Expr_Dollar, # Only for legacy eggex /d+$/ } # Note: We have two lists of ops because Id.Op_Semi is used, not # Id.Arith_Semi. for _, token_str, id_ in lex.EXPR_OPS: assert token_str not in OPS, token_str OPS[token_str] = id_ # Tokens that look like / or ${ or @{ triples = ( meta.ID_SPEC.LexerPairs(Kind.Arith) + lex.OIL_LEFT_SUBS + lex.OIL_LEFT_UNQUOTED + lex.EXPR_WORDS ) more_ops = {} for _, token_str, id_ in triples: assert token_str not in more_ops, token_str more_ops[token_str] = id_ # Tokens that look like 'for' keyword_ops = {} for _, token_str, id_ in lex.EXPR_WORDS: # for, in, etc. assert token_str not in keyword_ops, token_str keyword_ops[token_str] = id_ if 0: from pprint import pprint pprint(OPS) print('---') pprint(more_ops) print('---') pprint(keyword_ops) print('---') tok_def = OilTokenDef(OPS, more_ops, keyword_ops) if action == 'marshal': # generate the grammar and parse it grammar_path = argv[0] out_dir = argv[1] basename, _ = os.path.splitext(os.path.basename(grammar_path)) # HACK for find: if basename == 'find': from tools.find import tokenizer as find_tokenizer tok_def = find_tokenizer.TokenDef() with open(grammar_path) as f: gr = pgen.MakeGrammar(f, tok_def=tok_def) marshal_path = os.path.join(out_dir, basename + '.marshal') with open(marshal_path, 'wb') as out_f: gr.dump(out_f) nonterm_path = os.path.join(out_dir, basename + '_nt.py') with open(nonterm_path, 'w') as out_f: gr.dump_nonterminals(out_f) log('Compiled %s -> %s and %s', grammar_path, marshal_path, nonterm_path) #gr.report() elif action == 'parse': # generate the grammar and parse it # Remove build dependency from frontend import parse_lib from oil_lang import expr_parse grammar_path = argv[0] start_symbol = argv[1] code_str = argv[2] # For choosing lexer and semantic actions grammar_name, _ = os.path.splitext(os.path.basename(grammar_path)) with open(grammar_path) as f: gr = pgen.MakeGrammar(f, tok_def=tok_def) arena = alloc.Arena() lex_ = MakeOilLexer(code_str, arena) is_expr = grammar_name in ('calc', 'grammar') parse_opts = parse_lib.OilParseOptions() parse_ctx = parse_lib.ParseContext(arena, parse_opts, {}, gr) p = expr_parse.ExprParser(parse_ctx, gr) try: pnode, _ = p.Parse(lex_, gr.symbol2number[start_symbol]) except parse.ParseError as e: log('Parse Error: %s', e) return 1 names = parse_lib.MakeGrammarNames(gr) p_printer = expr_parse.ParseTreePrinter(names) # print raw nodes p_printer.Print(pnode) if is_expr: from oil_lang import expr_to_ast tr = expr_to_ast.Transformer(gr) if start_symbol == 'eval_input': ast_node = tr.Expr(pnode) else: ast_node = tr.VarDecl(pnode) ast_node.PrettyPrint() print() elif action == 'stdlib-test': # This shows how deep Python's parse tree is. It doesn't use semantic # actions to prune on the fly! import parser # builtin module t = parser.expr('1+2') print(t) t2 = parser.st2tuple(t) print(t2) else: raise RuntimeError('Invalid action %r' % action)
def main(argv): # type: (List[str]) -> int arena = alloc.Arena() parse_opts = parse_lib.OilParseOptions() # Dummy value; not respecting aliases! aliases = {} # type: Dict[str, str] # parse `` and a[x+1]=bar differently oil_grammar = None # type: Grammar if mylib.PYTHON: loader = pyutil.GetResourceLoader() oil_grammar = meta.LoadOilGrammar(loader) parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar) pretty_print = True if len(argv) == 1: line_reader = reader.FileLineReader(mylib.Stdin(), arena) src = source.Stdin('') # type: source_t elif len(argv) == 2: path = argv[1] f = mylib.open(path) line_reader = reader.FileLineReader(f, arena) src = source.MainFile(path) elif len(argv) == 3: if argv[1] == '-c': # This path is easier to run through GDB line_reader = reader.StringLineReader(argv[2], arena) src = source.CFlag() elif argv[1] == '-n': # For benchmarking, allow osh_parse -n file.txt path = argv[2] f = mylib.open(path) line_reader = reader.FileLineReader(f, arena) src = source.MainFile(path) # This is like --ast-format none, which benchmarks/osh-helper.sh passes. pretty_print = False else: raise AssertionError() else: raise AssertionError() arena.PushSource(src) c_parser = parse_ctx.MakeOshParser(line_reader) try: #node = main_loop.ParseWholeFile(c_parser) node = ParseWholeFile(c_parser) except error.Parse as e: ui.PrettyPrintError(e, arena) return 2 assert node is not None # C++ doesn't have the abbreviations yet (though there are some differences # like omitting spids) #tree = node.AbbreviatedTree() if pretty_print: tree = node.PrettyTree() ast_f = fmt.DetectConsoleOutput(mylib.Stdout()) fmt.PrintTree(tree, ast_f) ast_f.write('\n') return 0
def OshCommandMain(argv): """Run an 'oshc' tool. 'osh' is short for "osh compiler" or "osh command". TODO: - oshc --help oshc deps --path: the $PATH to use to find executables. What about libraries? NOTE: we're leaving out su -c, find, xargs, etc.? Those should generally run functions using the $0 pattern. --chained-command sudo """ try: action = argv[0] except IndexError: raise args.UsageError('Missing required subcommand.') if action not in SUBCOMMANDS: raise args.UsageError('Invalid subcommand %r.' % action) arena = alloc.Arena() try: script_name = argv[1] arena.PushSource(source.MainFile(script_name)) except IndexError: arena.PushSource(source.Stdin()) f = sys.stdin else: try: f = open(script_name) except IOError as e: ui.Stderr("oshc: Couldn't open %r: %s", script_name, posix.strerror(e.errno)) return 2 aliases = {} # Dummy value; not respecting aliases! loader = pyutil.GetResourceLoader() oil_grammar = meta.LoadOilGrammar(loader) parse_opts = parse_lib.OilParseOptions() # parse `` and a[x+1]=bar differently parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar, one_pass_parse=True) line_reader = reader.FileLineReader(f, arena) c_parser = parse_ctx.MakeOshParser(line_reader) try: node = main_loop.ParseWholeFile(c_parser) except util.ParseError as e: ui.PrettyPrintError(e, arena) return 2 assert node is not None f.close() # Columns for list-* # path line name # where name is the binary path, variable name, or library path. # bin-deps and lib-deps can be used to make an app bundle. # Maybe I should list them together? 'deps' can show 4 columns? # # path, line, type, name # # --pretty can show the LST location. # stderr: show how we're following imports? if action == 'translate': osh2oil.PrintAsOil(arena, node) elif action == 'arena': # for debugging osh2oil.PrintArena(arena) elif action == 'spans': # for debugging osh2oil.PrintSpans(arena) elif action == 'format': # TODO: autoformat code raise NotImplementedError(action) elif action == 'deps': deps.Deps(node) elif action == 'undefined-vars': # could be environment variables raise NotImplementedError else: raise AssertionError # Checked above return 0
def ShellMain(lang, argv0, argv, login_shell): """Used by bin/osh and bin/oil. Args: lang: 'osh' or 'oil' argv0, argv: So we can also invoke bin/osh as 'oil.ovm osh'. Like busybox. login_shell: Was - on the front? """ # Differences between osh and oil: # - --help? I guess Oil has a SUPERSET of OSH options. # - oshrc vs oilrc # - the parser and executor # - Change the prompt in the interactive shell? assert lang in ('osh', 'oil'), lang arg_r = args.Reader(argv) try: opts = OSH_SPEC.Parse(arg_r) except args.UsageError as e: ui.Stderr('osh usage error: %s', e.msg) return 2 # NOTE: This has a side effect of deleting _OVM_* from the environment! # TODO: Thread this throughout the program, and get rid of the global # variable in core/util.py. Rename to InitResourceLaoder(). It's now only # used for the 'help' builtin and --version. loader = pyutil.GetResourceLoader() if opts.help: builtin.Help(['%s-usage' % lang], loader) return 0 if opts.version: # OSH version is the only binary in Oil right now, so it's all one version. _ShowVersion() return 0 if arg_r.AtEnd(): dollar0 = argv0 has_main = False else: dollar0 = arg_r.Peek() # the script name, or the arg after -c has_main = True arena = alloc.Arena() errfmt = ui.ErrorFormatter(arena) # NOTE: has_main is only for ${BASH_SOURCE[@} and family. Could be a # required arg. mem = state.Mem(dollar0, argv[arg_r.i + 1:], posix.environ, arena, has_main=has_main) builtin_funcs.Init(mem) procs = {} job_state = process.JobState() fd_state = process.FdState(errfmt, job_state) parse_opts = parse_lib.OilParseOptions() exec_opts = state.ExecOpts(mem, parse_opts, line_input) if opts.show_options: # special case: sh -o exec_opts.ShowOptions([]) return 0 builtin_pure.SetExecOpts(exec_opts, opts.opt_changes, opts.shopt_changes) aliases = {} # feedback between runtime and parser oil_grammar = meta.LoadOilGrammar(loader) if opts.one_pass_parse and not exec_opts.noexec: raise args.UsageError('--one-pass-parse requires noexec (-n)') parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar, one_pass_parse=opts.one_pass_parse) # Three ParseContext instances SHARE aliases. comp_arena = alloc.Arena() comp_arena.PushSource(source.Unused('completion')) trail1 = parse_lib.Trail() # one_pass_parse needs to be turned on to complete inside backticks. TODO: # fix the issue where ` gets erased because it's not part of # set_completer_delims(). comp_ctx = parse_lib.ParseContext(comp_arena, parse_opts, aliases, oil_grammar, trail=trail1, one_pass_parse=True) hist_arena = alloc.Arena() hist_arena.PushSource(source.Unused('history')) trail2 = parse_lib.Trail() hist_ctx = parse_lib.ParseContext(hist_arena, parse_opts, aliases, oil_grammar, trail=trail2) # Deps helps manages dependencies. These dependencies are circular: # - ex and word_ev, arith_ev -- for command sub, arith sub # - arith_ev and word_ev -- for $(( ${a} )) and $x$(( 1 )) # - ex and builtins (which execute code, like eval) # - prompt_ev needs word_ev for $PS1, which needs prompt_ev for @P exec_deps = cmd_exec.Deps() # TODO: In general, exec_deps are shared between the mutually recursive # evaluators. Some of the four below are only shared between a builtin and # the Executor, so we could put them somewhere else. exec_deps.traps = {} exec_deps.trap_nodes = [] # TODO: Clear on fork() to avoid duplicates exec_deps.job_state = job_state # note: exec_opts.interactive set later exec_deps.waiter = process.Waiter(job_state, exec_opts) exec_deps.errfmt = errfmt my_pid = posix.getpid() debug_path = '' debug_dir = posix.environ.get('OSH_DEBUG_DIR') if opts.debug_file: # --debug-file takes precedence over OSH_DEBUG_DIR debug_path = opts.debug_file elif debug_dir: debug_path = os_path.join(debug_dir, '%d-osh.log' % my_pid) if debug_path: # This will be created as an empty file if it doesn't exist, or it could be # a pipe. try: debug_f = util.DebugFile(fd_state.Open(debug_path, mode='w')) except OSError as e: ui.Stderr("osh: Couldn't open %r: %s", debug_path, posix.strerror(e.errno)) return 2 else: debug_f = util.NullDebugFile() exec_deps.debug_f = debug_f # Not using datetime for dependency reasons. TODO: maybe show the date at # the beginning of the log, and then only show time afterward? To save # space, and make space for microseconds. (datetime supports microseconds # but time.strftime doesn't). iso_stamp = time.strftime("%Y-%m-%d %H:%M:%S") debug_f.log('%s [%d] OSH started with argv %s', iso_stamp, my_pid, argv) if debug_path: debug_f.log('Writing logs to %r', debug_path) interp = posix.environ.get('OSH_HIJACK_SHEBANG', '') exec_deps.search_path = state.SearchPath(mem) exec_deps.ext_prog = process.ExternalProgram(interp, fd_state, exec_deps.search_path, errfmt, debug_f) splitter = split.SplitContext(mem) exec_deps.splitter = splitter # split() builtin builtin_funcs.SetGlobalFunc(mem, 'split', lambda s: splitter.SplitForWordEval(s)) # This could just be OSH_DEBUG_STREAMS='debug crash' ? That might be # stuffing too much into one, since a .json crash dump isn't a stream. crash_dump_dir = posix.environ.get('OSH_CRASH_DUMP_DIR', '') exec_deps.dumper = dev.CrashDumper(crash_dump_dir) if opts.xtrace_to_debug_file: trace_f = debug_f else: trace_f = util.DebugFile(sys.stderr) exec_deps.trace_f = trace_f comp_lookup = completion.Lookup() # Various Global State objects to work around readline interfaces compopt_state = completion.OptionState() comp_ui_state = comp_ui.State() prompt_state = comp_ui.PromptState() dir_stack = state.DirStack() new_var = builtin_assign.NewVar(mem, procs, errfmt) builtins = { # Lookup builtin_e.ECHO: builtin_pure.Echo(exec_opts), builtin_e.PRINTF: builtin_printf.Printf(mem, parse_ctx, errfmt), builtin_e.PUSHD: builtin.Pushd(mem, dir_stack, errfmt), builtin_e.POPD: builtin.Popd(mem, dir_stack, errfmt), builtin_e.DIRS: builtin.Dirs(mem, dir_stack, errfmt), builtin_e.PWD: builtin.Pwd(mem, errfmt), builtin_e.READ: builtin.Read(splitter, mem), builtin_e.HELP: builtin.Help(loader, errfmt), builtin_e.HISTORY: builtin.History(line_input), # Completion (more added below) builtin_e.COMPOPT: builtin_comp.CompOpt(compopt_state, errfmt), builtin_e.COMPADJUST: builtin_comp.CompAdjust(mem), # test / [ differ by need_right_bracket builtin_e.TEST: builtin_bracket.Test(False, errfmt), builtin_e.BRACKET: builtin_bracket.Test(True, errfmt), # Assignment (which are pure) builtin_e.DECLARE: new_var, builtin_e.TYPESET: new_var, builtin_e.LOCAL: new_var, builtin_e.EXPORT: builtin_assign.Export(mem, errfmt), builtin_e.READONLY: builtin_assign.Readonly(mem, errfmt), builtin_e.UNSET: builtin_assign.Unset(mem, procs, errfmt), builtin_e.SHIFT: builtin_assign.Shift(mem), # Pure builtin_e.SET: builtin_pure.Set(exec_opts, mem), builtin_e.SHOPT: builtin_pure.Shopt(exec_opts), builtin_e.ALIAS: builtin_pure.Alias(aliases, errfmt), builtin_e.UNALIAS: builtin_pure.UnAlias(aliases, errfmt), builtin_e.TYPE: builtin_pure.Type(procs, aliases, exec_deps.search_path), builtin_e.HASH: builtin_pure.Hash(exec_deps.search_path), builtin_e.GETOPTS: builtin_pure.GetOpts(mem, errfmt), builtin_e.COLON: lambda arg_vec: 0, # a "special" builtin builtin_e.TRUE: lambda arg_vec: 0, builtin_e.FALSE: lambda arg_vec: 1, # Process builtin_e.WAIT: builtin_process.Wait(exec_deps.waiter, exec_deps.job_state, mem, errfmt), builtin_e.JOBS: builtin_process.Jobs(exec_deps.job_state), builtin_e.FG: builtin_process.Fg(exec_deps.job_state, exec_deps.waiter), builtin_e.BG: builtin_process.Bg(exec_deps.job_state), builtin_e.UMASK: builtin_process.Umask, # Oil builtin_e.REPR: builtin_oil.Repr(mem, errfmt), builtin_e.PUSH: builtin_oil.Push(mem, errfmt), builtin_e.USE: builtin_oil.Use(mem, errfmt), } ex = cmd_exec.Executor(mem, fd_state, procs, builtins, exec_opts, parse_ctx, exec_deps) exec_deps.ex = ex word_ev = word_eval.NormalWordEvaluator(mem, exec_opts, exec_deps, arena) exec_deps.word_ev = word_ev arith_ev = osh_expr_eval.ArithEvaluator(mem, exec_opts, word_ev, errfmt) exec_deps.arith_ev = arith_ev word_ev.arith_ev = arith_ev # Another circular dependency bool_ev = osh_expr_eval.BoolEvaluator(mem, exec_opts, word_ev, errfmt) exec_deps.bool_ev = bool_ev expr_ev = expr_eval.OilEvaluator(mem, procs, ex, word_ev, errfmt) exec_deps.expr_ev = expr_ev tracer = dev.Tracer(parse_ctx, exec_opts, mem, word_ev, trace_f) exec_deps.tracer = tracer # HACK for circular deps ex.word_ev = word_ev ex.arith_ev = arith_ev ex.bool_ev = bool_ev ex.expr_ev = expr_ev ex.tracer = tracer word_ev.expr_ev = expr_ev spec_builder = builtin_comp.SpecBuilder(ex, parse_ctx, word_ev, splitter, comp_lookup) # Add some builtins that depend on the executor! complete_builtin = builtin_comp.Complete(spec_builder, comp_lookup) builtins[builtin_e.COMPLETE] = complete_builtin builtins[builtin_e.COMPGEN] = builtin_comp.CompGen(spec_builder) builtins[builtin_e.CD] = builtin.Cd(mem, dir_stack, ex, errfmt) builtins[builtin_e.JSON] = builtin_oil.Json(mem, ex, errfmt) sig_state = process.SignalState() sig_state.InitShell() builtins[builtin_e.TRAP] = builtin_process.Trap(sig_state, exec_deps.traps, exec_deps.trap_nodes, ex, errfmt) if lang == 'oil': # The Oil executor wraps an OSH executor? It needs to be able to source # it. ex = oil_cmd_exec.OilExecutor(ex) # PromptEvaluator rendering is needed in non-interactive shells for @P. prompt_ev = prompt.Evaluator(lang, parse_ctx, ex, mem) exec_deps.prompt_ev = prompt_ev word_ev.prompt_ev = prompt_ev # HACK for circular deps # History evaluation is a no-op if line_input is None. hist_ev = history.Evaluator(line_input, hist_ctx, debug_f) if opts.c is not None: arena.PushSource(source.CFlag()) line_reader = reader.StringLineReader(opts.c, arena) if opts.i: # -c and -i can be combined exec_opts.interactive = True elif opts.i: # force interactive arena.PushSource(source.Stdin(' -i')) line_reader = reader.InteractiveLineReader(arena, prompt_ev, hist_ev, line_input, prompt_state, sig_state) exec_opts.interactive = True else: try: script_name = arg_r.Peek() except IndexError: if sys.stdin.isatty(): arena.PushSource(source.Interactive()) line_reader = reader.InteractiveLineReader( arena, prompt_ev, hist_ev, line_input, prompt_state, sig_state) exec_opts.interactive = True else: arena.PushSource(source.Stdin('')) line_reader = reader.FileLineReader(sys.stdin, arena) else: arena.PushSource(source.MainFile(script_name)) try: f = fd_state.Open(script_name) except OSError as e: ui.Stderr("osh: Couldn't open %r: %s", script_name, posix.strerror(e.errno)) return 1 line_reader = reader.FileLineReader(f, arena) # TODO: assert arena.NumSourcePaths() == 1 # TODO: .rc file needs its own arena. if lang == 'osh': c_parser = parse_ctx.MakeOshParser(line_reader) else: c_parser = parse_ctx.MakeOilParser(line_reader) if exec_opts.interactive: # Calculate ~/.config/oil/oshrc or oilrc # Use ~/.config/oil to avoid cluttering the user's home directory. Some # users may want to ln -s ~/.config/oil/oshrc ~/oshrc or ~/.oshrc. # https://unix.stackexchange.com/questions/24347/why-do-some-applications-use-config-appname-for-their-config-data-while-other home_dir = process.GetHomeDir() assert home_dir is not None rc_path = opts.rcfile or os_path.join(home_dir, '.config/oil', lang + 'rc') history_filename = os_path.join(home_dir, '.config/oil', 'history_' + lang) if line_input: # NOTE: We're using a different WordEvaluator here. ev = word_eval.CompletionWordEvaluator(mem, exec_opts, exec_deps, arena) root_comp = completion.RootCompleter(ev, mem, comp_lookup, compopt_state, comp_ui_state, comp_ctx, debug_f) term_width = 0 if opts.completion_display == 'nice': try: term_width = libc.get_terminal_width() except IOError: # stdin not a terminal pass if term_width != 0: display = comp_ui.NiceDisplay(term_width, comp_ui_state, prompt_state, debug_f, line_input) else: display = comp_ui.MinimalDisplay(comp_ui_state, prompt_state, debug_f) _InitReadline(line_input, history_filename, root_comp, display, debug_f) _InitDefaultCompletions(ex, complete_builtin, comp_lookup) else: # Without readline module display = comp_ui.MinimalDisplay(comp_ui_state, prompt_state, debug_f) sig_state.InitInteractiveShell(display) # NOTE: Call this AFTER _InitDefaultCompletions. try: SourceStartupFile(rc_path, lang, parse_ctx, ex) except util.UserExit as e: return e.status line_reader.Reset() # After sourcing startup file, render $PS1 prompt_plugin = prompt.UserPlugin(mem, parse_ctx, ex) try: status = main_loop.Interactive(opts, ex, c_parser, display, prompt_plugin, errfmt) if ex.MaybeRunExitTrap(): status = ex.LastStatus() except util.UserExit as e: status = e.status return status nodes_out = [] if exec_opts.noexec else None if nodes_out is None and opts.parser_mem_dump: raise args.UsageError('--parser-mem-dump can only be used with -n') _tlog('Execute(node)') try: status = main_loop.Batch(ex, c_parser, arena, nodes_out=nodes_out) if ex.MaybeRunExitTrap(): status = ex.LastStatus() except util.UserExit as e: status = e.status # Only print nodes if the whole parse succeeded. if nodes_out is not None and status == 0: if opts.parser_mem_dump: # only valid in -n mode # This might be superstition, but we want to let the value stabilize # after parsing. bash -c 'cat /proc/$$/status' gives different results # with a sleep. time.sleep(0.001) input_path = '/proc/%d/status' % posix.getpid() with open(input_path) as f, open(opts.parser_mem_dump, 'w') as f2: contents = f.read() f2.write(contents) log('Wrote %s to %s (--parser-mem-dump)', input_path, opts.parser_mem_dump) ui.PrintAst(nodes_out, opts) # NOTE: 'exit 1' is ControlFlow and gets here, but subshell/commandsub # don't because they call sys.exit(). if opts.runtime_mem_dump: # This might be superstition, but we want to let the value stabilize # after parsing. bash -c 'cat /proc/$$/status' gives different results # with a sleep. time.sleep(0.001) input_path = '/proc/%d/status' % posix.getpid() with open(input_path) as f, open(opts.runtime_mem_dump, 'w') as f2: contents = f.read() f2.write(contents) log('Wrote %s to %s (--runtime-mem-dump)', input_path, opts.runtime_mem_dump) # NOTE: We haven't closed the file opened with fd_state.Open return status
from osh import builtin from osh import state Process = process.Process ExternalThunk = process.ExternalThunk def Banner(msg): print('-' * 60) print(msg) # TODO: Put these all in a function. _ARENA = test_lib.MakeArena('process_test.py') _MEM = state.Mem('', [], {}, _ARENA) _PARSE_OPTS = parse_lib.OilParseOptions() _EXEC_OPTS = state.ExecOpts(_MEM, _PARSE_OPTS, None) _JOB_STATE = process.JobState() _WAITER = process.Waiter(_JOB_STATE, _EXEC_OPTS) _ERRFMT = ui.ErrorFormatter(_ARENA) _FD_STATE = process.FdState(_ERRFMT, _JOB_STATE) _SEARCH_PATH = state.SearchPath(_MEM) _EXT_PROG = process.ExternalProgram(False, _FD_STATE, _SEARCH_PATH, _ERRFMT, util.NullDebugFile()) def _CommandNode(code_str, arena): c_parser = test_lib.InitCommandParser(code_str, arena=arena) return c_parser.ParseLogicalLine()