def addskip(skips, skip): """Add a line skip to the skips.""" if skip < 1: complain(CoconutInternalException("invalid skip of line " + str(skip))) else: skips.append(skip) return skips
def find_new_value(value, toklist, new_toklist): """Find the value in new_toklist that corresponds to the given value in toklist.""" # find ParseResults by looking up their tokens if isinstance(value, ParseResults): if value._ParseResults__toklist == toklist: new_value_toklist = new_toklist else: new_value_toklist = [] for inner_value in value._ParseResults__toklist: new_value_toklist.append(find_new_value(inner_value, toklist, new_toklist)) return ParseResults(new_value_toklist) # find other objects by looking them up directly try: return new_toklist[toklist.index(value)] except ValueError: complain( lambda: CoconutInternalException( "inefficient reevaluation of tokens: {} not in {}".format( value, toklist, ), ), ) return evaluate_tokens(value)
def evaluate_tokens(tokens): """Evaluate the given tokens in the computation graph.""" if isinstance(tokens, str): return tokens elif isinstance(tokens, ParseResults): # evaluate the list portion of the ParseResults toklist, name, asList, modal = tokens.__getnewargs__() new_toklist = [evaluate_tokens(toks) for toks in toklist] new_tokens = ParseResults(new_toklist, name, asList, modal) # evaluate the dictionary portion of the ParseResults new_tokdict = {} for name, occurrences in tokens._ParseResults__tokdict.items(): new_occurences = [] for value, position in occurrences: if isinstance(value, ParseResults ) and value._ParseResults__toklist == toklist: new_value = new_tokens else: try: new_value = new_toklist[toklist.index(value)] except ValueError: complain(lambda: CoconutInternalException( "inefficient reevaluation of tokens: {} not in {}". format( value, toklist, ))) new_value = evaluate_tokens(value) new_occurences.append( _ParseResultsWithOffset(new_value, position)) new_tokdict[name] = occurrences new_tokens._ParseResults__accumNames.update( tokens._ParseResults__accumNames) new_tokens._ParseResults__tokdict.update(new_tokdict) return new_tokens elif isinstance(tokens, ComputationNode): return tokens.evaluate() elif isinstance(tokens, (list, tuple)): return [evaluate_tokens(inner_toks) for inner_toks in tokens] else: raise CoconutInternalException("invalid computation graph tokens", tokens)
def evaluate_tokens(tokens): """Evaluate the given tokens in the computation graph.""" if isinstance(tokens, str): return tokens elif isinstance(tokens, ParseResults): # evaluate the list portion of the ParseResults toklist, name, asList, modal = tokens.__getnewargs__() new_toklist = [evaluate_tokens(toks) for toks in toklist] new_tokens = ParseResults(new_toklist, name, asList, modal) # evaluate the dictionary portion of the ParseResults new_tokdict = {} for name, occurrences in tokens._ParseResults__tokdict.items(): new_occurences = [] for value, position in occurrences: if isinstance(value, ParseResults) and value._ParseResults__toklist == toklist: new_value = new_tokens else: try: new_value = new_toklist[toklist.index(value)] except ValueError: complain(lambda: CoconutInternalException("inefficient reevaluation of tokens: {} not in {}".format( value, toklist, ))) new_value = evaluate_tokens(value) new_occurences.append(_ParseResultsWithOffset(new_value, position)) new_tokdict[name] = occurrences new_tokens._ParseResults__accumNames.update(tokens._ParseResults__accumNames) new_tokens._ParseResults__tokdict.update(new_tokdict) return new_tokens elif isinstance(tokens, ComputationNode): return tokens.evaluate() elif isinstance(tokens, (list, tuple)): return [evaluate_tokens(inner_toks) for inner_toks in tokens] else: raise CoconutInternalException("invalid computation graph tokens", tokens)
from prompt_toolkit.lexers.pygments import PygmentsLexer from prompt_toolkit.styles.pygments import style_from_pygments_cls except ImportError: # prompt_toolkit v1 from prompt_toolkit.layout.lexers import PygmentsLexer from prompt_toolkit.styles import style_from_pygments as style_from_pygments_cls import pygments import pygments.styles from coconut.highlighter import CoconutLexer except ImportError: prompt_toolkit = None except KeyError: complain( ImportError( "detected outdated pygments version (run 'pip install --upgrade pygments' to fix)", ), ) prompt_toolkit = None # ----------------------------------------------------------------------------------------------------------------------- # FUNCTIONS: # ----------------------------------------------------------------------------------------------------------------------- def openfile(filename, opentype="r+"): """Open a file using default_encoding.""" return open(filename, opentype, encoding=default_encoding) # using open from coconut.root def writefile(openedfile, newcontents):
from prompt_toolkit.lexers.pygments import PygmentsLexer from prompt_toolkit.styles.pygments import style_from_pygments_cls except ImportError: # prompt_toolkit v1 from prompt_toolkit.layout.lexers import PygmentsLexer from prompt_toolkit.styles import style_from_pygments as style_from_pygments_cls import pygments import pygments.styles from coconut.highlighter import CoconutLexer except ImportError: prompt_toolkit = None except KeyError: complain(ImportError( "detected outdated pygments version (run 'pip install --upgrade pygments' to fix)", )) prompt_toolkit = None # ----------------------------------------------------------------------------------------------------------------------- # FUNCTIONS: # ----------------------------------------------------------------------------------------------------------------------- def openfile(filename, opentype="r+"): """Open a file using default_encoding.""" return open(filename, opentype, encoding=default_encoding) # using open from coconut.root def writefile(openedfile, newcontents): """Set the contents of a file."""
def evaluate_tokens(tokens, **kwargs): """Evaluate the given tokens in the computation graph.""" # can't have this be a normal kwarg to make evaluate_tokens a valid parse action evaluated_toklists = kwargs.pop("evaluated_toklists", ()) internal_assert(not kwargs, "invalid keyword arguments to evaluate_tokens", kwargs) if isinstance(tokens, ParseResults): # evaluate the list portion of the ParseResults old_toklist, name, asList, modal = tokens.__getnewargs__() new_toklist = None for eval_old_toklist, eval_new_toklist in evaluated_toklists: if old_toklist == eval_old_toklist: new_toklist = eval_new_toklist break if new_toklist is None: new_toklist = [evaluate_tokens(toks, evaluated_toklists=evaluated_toklists) for toks in old_toklist] # overwrite evaluated toklists rather than appending, since this # should be all the information we need for evaluating the dictionary evaluated_toklists = ((old_toklist, new_toklist),) new_tokens = ParseResults(new_toklist, name, asList, modal) new_tokens._ParseResults__accumNames.update(tokens._ParseResults__accumNames) # evaluate the dictionary portion of the ParseResults new_tokdict = {} for name, occurrences in tokens._ParseResults__tokdict.items(): new_occurrences = [] for value, position in occurrences: new_value = evaluate_tokens(value, evaluated_toklists=evaluated_toklists) new_occurrences.append(_ParseResultsWithOffset(new_value, position)) new_tokdict[name] = new_occurrences new_tokens._ParseResults__tokdict.update(new_tokdict) return new_tokens else: if evaluated_toklists: for eval_old_toklist, eval_new_toklist in evaluated_toklists: indices = multi_index_lookup(eval_old_toklist, tokens, indexable_types=indexable_evaluated_tokens_types) if indices is not None: new_tokens = eval_new_toklist for ind in indices: new_tokens = new_tokens[ind] return new_tokens complain( lambda: CoconutInternalException( "inefficient reevaluation of tokens: {tokens} not in:\n{toklists}".format( tokens=tokens, toklists=pformat([eval_old_toklist for eval_old_toklist, eval_new_toklist in evaluated_toklists]), ), ), ) if isinstance(tokens, str): return tokens elif isinstance(tokens, ComputationNode): return tokens.evaluate() elif isinstance(tokens, list): return [evaluate_tokens(inner_toks, evaluated_toklists=evaluated_toklists) for inner_toks in tokens] elif isinstance(tokens, tuple): return tuple(evaluate_tokens(inner_toks, evaluated_toklists=evaluated_toklists) for inner_toks in tokens) else: raise CoconutInternalException("invalid computation graph tokens", tokens)
from prompt_toolkit.lexers.pygments import PygmentsLexer from prompt_toolkit.styles.pygments import style_from_pygments_cls except ImportError: # prompt_toolkit v1 from prompt_toolkit.layout.lexers import PygmentsLexer from prompt_toolkit.styles import style_from_pygments as style_from_pygments_cls import pygments import pygments.styles from coconut.highlighter import CoconutLexer except ImportError: prompt_toolkit = None except KeyError: complain( ImportError( "detected outdated pygments version (run '{python} -m pip install --upgrade pygments' to fix)" .format(python=sys.executable), ), ) prompt_toolkit = None # ----------------------------------------------------------------------------------------------------------------------- # UTILITIES: # ----------------------------------------------------------------------------------------------------------------------- def writefile(openedfile, newcontents): """Set the contents of a file.""" openedfile.seek(0) openedfile.truncate() openedfile.write(newcontents)