def MakeTestEvaluator(): arena = alloc.SideArena('<MakeTestEvaluator>') mem = state.Mem('', [], {}, arena) exec_opts = state.ExecOpts(mem, None) splitter = legacy.SplitContext(mem) ev = word_eval.CompletionWordEvaluator(mem, exec_opts, splitter, arena) return ev
def testCompletesAliases(self): # I put some aliases in this file. with open('testdata/completion/osh-unit.bash') as f: code_str = f.read() trail = parse_lib.Trail() arena = alloc.SideArena('<completion_test.py>') parse_ctx = parse_lib.ParseContext(arena, {}, trail=trail) comp_lookup = completion.Lookup() ex = test_lib.EvalCode(code_str, parse_ctx, comp_lookup=comp_lookup) r = _MakeRootCompleter(parse_ctx=parse_ctx, comp_lookup=comp_lookup) # The original command m = list(r.Matches(MockApi('ls '))) self.assertEqual(['ls one ', 'ls two '], sorted(m)) # Alias for the command m = list(r.Matches(MockApi('ll '))) self.assertEqual(['ll one ', 'll two '], sorted(m)) # DOUBLE alias expansion goes back to original m = list(r.Matches(MockApi('ll_classify '))) self.assertEqual(['ll_classify one ', 'll_classify two '], sorted(m)) # Trailing space m = list(r.Matches(MockApi('ll_trailing '))) self.assertEqual(['ll_trailing one ', 'll_trailing two '], sorted(m)) # It should NOT clobber completio registered for aliases m = list(r.Matches(MockApi('ll_own_completion '))) self.assertEqual( ['ll_own_completion own ', 'll_own_completion words '], sorted(m))
def MakeTestEvaluator(): arena = alloc.SideArena('<MakeTestEvaluator>') mem = state.Mem('', [], {}, arena) exec_opts = state.ExecOpts(mem, None) exec_deps = cmd_exec.Deps() exec_deps.splitter = split.SplitContext(mem) ev = word_eval.CompletionWordEvaluator(mem, exec_opts, exec_deps, arena) return ev
def testRunsUserDefinedFunctions(self): # This is here because it's hard to test readline with the spec tests. with open('testdata/completion/osh-unit.bash') as f: code_str = f.read() trail = parse_lib.Trail() arena = alloc.SideArena('<completion_test.py>') parse_ctx = parse_lib.ParseContext(arena, {}, trail=trail) comp_lookup = completion.Lookup() ex = test_lib.EvalCode(code_str, parse_ctx, comp_lookup=comp_lookup) r = _MakeRootCompleter(comp_lookup=comp_lookup) # By default, we get a space on the end. m = list(r.Matches(MockApi('mywords t'))) self.assertEqual(['mywords three ', 'mywords two '], sorted(m)) # No space m = list(r.Matches(MockApi('mywords_nospace t'))) self.assertEqual(['mywords_nospace three', 'mywords_nospace two'], sorted(m)) # Filtered out two and bin m = list(r.Matches(MockApi('flagX '))) self.assertEqual(['flagX one ', 'flagX three '], sorted(m)) # Filter out everything EXCEPT two and bin m = list(r.Matches(MockApi('flagX_bang '))) self.assertEqual(['flagX_bang bin ', 'flagX_bang two '], sorted(m)) # -X with -P m = list(r.Matches(MockApi('flagX_prefix '))) self.assertEqual(['flagX_prefix __one ', 'flagX_prefix __three '], sorted(m)) # -P with plusdirs m = list(r.Matches(MockApi('prefix_plusdirs b'))) self.assertEqual([ 'prefix_plusdirs __bin ', 'prefix_plusdirs benchmarks/', 'prefix_plusdirs bin/', 'prefix_plusdirs build/' ], sorted(m)) # -X with plusdirs. We're filtering out bin/, and then it's added back by # plusdirs. The filter doesn't kill it. m = list(r.Matches(MockApi('flagX_plusdirs b'))) self.assertEqual([ 'flagX_plusdirs benchmarks/', 'flagX_plusdirs bin/', 'flagX_plusdirs build/' ], sorted(m)) # -P with dirnames. -P is NOT respected. m = list(r.Matches(MockApi('prefix_dirnames b'))) self.assertEqual([ 'prefix_dirnames benchmarks/', 'prefix_dirnames bin/', 'prefix_dirnames build/' ], sorted(m))
def __init__(self, parse_ctx, exec_opts, mem, word_ev, f): """ Args: parse_ctx: For parsing PS4. exec_opts: For xtrace setting mem: for retrieving PS4 word_ev: for evaluating PS4 """ self.parse_ctx = parse_ctx self.exec_opts = exec_opts self.mem = mem self.word_ev = word_ev self.f = f # can be the --debug-file as well self.arena = alloc.SideArena('<$PS4>') self.parse_cache = {} # PS4 value -> CompoundWord. PS4 is scoped.
def __init__(self, parse_ctx, exec_opts, mem, word_ev, f): """ Args: parse_ctx: For parsing PS4. exec_opts: For xtrace setting mem: for retrieving PS4 word_ev: for evaluating PS4 """ self.parse_ctx = parse_ctx self.exec_opts = exec_opts self.mem = mem self.word_ev = word_ev self.f = f # can be the --debug-file as well # NOTE: We could use the same arena, since this doesn't happen during # translation. self.arena = alloc.SideArena('<$PS4>') self.parse_cache = {} # PS4 value -> CompoundWord. PS4 is scoped.
def testNoInfiniteLoop(self): # This was ONE place where we got an infinite loop. with open('testdata/completion/return-124.bash') as f: code_str = f.read() trail = parse_lib.Trail() arena = alloc.SideArena('<completion_test.py>') parse_ctx = parse_lib.ParseContext(arena, {}, trail=trail) comp_lookup = completion.Lookup() ex = test_lib.EvalCode(code_str, parse_ctx, comp_lookup=comp_lookup) r = _MakeRootCompleter(parse_ctx=parse_ctx, comp_lookup=comp_lookup) m = list(r.Matches(MockApi('bad '))) self.assertEqual([], sorted(m)) # Error: spec not changed m = list(r.Matches(MockApi('both '))) self.assertEqual([], sorted(m)) # Redefines completions m = list(r.Matches(MockApi('both2 '))) self.assertEqual(['both2 b1 ', 'both2 b2 '], sorted(m))
def _MakeAssignPair(parse_ctx, preparsed): """Create an assign_pair from a 4-tuples from DetectAssignment.""" left_token, close_token, part_offset, w = preparsed if left_token.id == Id.Lit_VarLike: # s=1 if left_token.val[-2] == '+': var_name = left_token.val[:-2] op = assign_op_e.PlusEqual else: var_name = left_token.val[:-1] op = assign_op_e.Equal lhs = lhs_expr.LhsName(var_name) lhs.spids.append(left_token.span_id) elif left_token.id == Id.Lit_ArrayLhsOpen: # a[x++]=1 var_name = left_token.val[:-1] if close_token.val[-2] == '+': op = assign_op_e.PlusEqual else: op = assign_op_e.Equal # Adapted from tools/osh2oil.py Cursor.PrintUntil # TODO: Make a method like arena.AppendPieces(start, end, []), and share # with alias. pieces = [] for span_id in xrange(left_token.span_id + 1, close_token.span_id): span = parse_ctx.arena.GetLineSpan(span_id) line = parse_ctx.arena.GetLine(span.line_id) piece = line[span.col : span.col + span.length] pieces.append(piece) # Now reparse everything between here code_str = ''.join(pieces) # NOTE: It's possible that an alias expansion underlies this, not a real file! # We have to use a SideArena since this will happen during translation. line_num = 99 source_name = 'TODO' arena = alloc.SideArena('<LHS array index at line %d of %s>' % (line_num, source_name)) a_parser = parse_ctx.MakeArithParser(code_str, arena) expr = a_parser.Parse() # raises util.ParseError # TODO: It reports from the wrong arena! lhs = lhs_expr.LhsIndexedName(var_name, expr) lhs.spids.append(left_token.span_id) else: raise AssertionError # TODO: Should we also create a rhs_exp.ArrayLiteral here? n = len(w.parts) if part_offset == n: val = osh_word.EmptyWord() else: val = osh_word.CompoundWord(w.parts[part_offset:]) val = word.TildeDetect(val) or val pair = syntax_asdl.assign_pair(lhs, op, val) pair.spids.append(left_token.span_id) # Do we need this? return pair
def _InitWordParserWithArena(s): arena = alloc.SideArena('word_parse_test.py') parse_ctx = parse_lib.ParseContext(arena, {}) line_reader, lexer = parse_lib.InitLexer(s, arena) w_parser, _ = parse_ctx.MakeParser(line_reader) return arena, w_parser
def _InitWordParser(s, arena=None): arena = arena or alloc.SideArena('word_parse_test.py') parse_ctx = parse_lib.ParseContext(arena, {}) line_reader, lexer = test_lib.InitLexer(s, arena) c_parser = parse_ctx.MakeOshParser(line_reader) return c_parser.w_parser # hack
def Matches(self, comp): arena = alloc.SideArena('<completion>') # Two strategies: # 1. COMP_WORDBREAKS like bash. set_completer_delims() # 2. Use the actual OSH parser. Parse these cases: # - echo # - $VA # - ${VA # - $(echo h) # - <(echo h) # - >(echo h) # - `` # - $(( VA # This should be a variable name # - while false; do <TAB> # - if <TAB> # - while <TAB> -- bash gets this wrong! # - command <TAB> -- bash-completion fills this in # - alias completion? # - alias ll='ls -l' # - also var expansion? # foo=ls # $foo <TAB> (even ZSH doesn't seem to handle this) # # the empty completer is consistently wrong. Only works in the first # position. # # I think bash-completion is fighting with bash? # # completing aliases -- someone mentioned about zsh if 0: w_parser, c_parser = self.parse_ctx.MakeParserForCompletion( comp.line, arena) comp_type, to_complete, comp_words = _GetCompletionType( w_parser, c_parser, self.ev, self.debug_f) else: comp_type, to_complete, comp_words = _GetCompletionType1( self.parser, comp.line) index = len(comp_words) - 1 # COMP_CWORD is -1 when it's empty # After parsing comp.Update(words=comp_words, index=index, to_complete=to_complete) if comp_type == completion_state_e.VAR_NAME: # Non-user chain chain = self.var_comp elif comp_type == completion_state_e.HASH_KEY: # Non-user chain chain = 'TODO' elif comp_type == completion_state_e.REDIR_FILENAME: # Non-user chain chain = FileSystemAction() elif comp_type == completion_state_e.FIRST: chain = self.comp_lookup.GetFirstCompleter() elif comp_type == completion_state_e.REST: chain = self.comp_lookup.GetCompleterForName(comp_words[0]) elif comp_type == completion_state_e.NONE: # Null chain? No completion? For example, # ${a:- <TAB> -- we have no idea what to put here chain = 'TODO' else: raise AssertionError(comp_type) self.progress_f.Write('Completing %r ... (Ctrl-C to cancel)', comp.line) start_time = time.time() self.debug_f.log('Using %s', chain) i = 0 for m in chain.Matches(comp): # TODO: need to dedupe these yield m i += 1 elapsed = time.time() - start_time plural = '' if i == 1 else 'es' self.progress_f.Write( '... %d match%s for %r in %.2f seconds (Ctrl-C to cancel)', i, plural, comp.line, elapsed) elapsed = time.time() - start_time plural = '' if i == 1 else 'es' self.progress_f.Write('Found %d match%s for %r in %.2f seconds', i, plural, comp.line, elapsed) done = True