def collect_grammar(s,scope:list): assert scope == uniq(scope) s.clear() s.scope = scope s.named_symbol('start') #s.set_start_symbol(s.syms.start) s.named_symbol('nonspecial_char') s.named_symbol('known_char') s.named_symbol('maybe_spaces') s.sequence('maybe_spaces', s.syms.maybe_spaces, s.known_char(' '), action=ignore, min=0) for i in scope: #the property is accessed here, forcing the registering of the nodes grammars sym = i.symbol if sym != None: if args.log_parsing: log(sym) rulename = 'start is %s' % s.symbol2debug_name(sym) else: rulename = "" s.rule(rulename , s.syms.start, sym)
def collect_grammar(s, scope: list, start=None): assert scope == uniq(scope) s.clear() log("grammar clear") # we dont have a separate tokenizer s.named_symbol("nonspecial_char") s.named_symbol("known_char") # maybe just convenience s.named_symbol("maybe_spaces") s.sequence("maybe_spaces", s.syms.maybe_spaces, s.known_char(" "), action=ignore, min=0) s.scope = scope anything = start == None if anything: s.start = s.named_symbol("start") else: s.start = start.symbol log("start=", s.start) for i in scope: # the property is accessed here, forcing the registering of the nodes grammars sym = i.symbol if anything: for i in scope: if i.symbol != None: if args.log_parsing: log(i.symbol) rulename = "start is %s" % s.symbol2debug_name(i.symbol) else: rulename = "" s.rule(rulename, s.start, i.symbol) # hmm how is this gonna mesh out with the "anything" rules and with autocompletion rules? # ok here we're gonna walk thru WorkAssess and BindsTighters and do the precedence and associativity magic """
def collect_grammar(s, full_scope:list, scope:list, start=None): full_scope = full_scope[:] scope = scope[:] assert scope == uniq(scope), (scope,uniq(scope)) s.clear() log("collect_grammar:...") #we dont have a separate tokenizer #any char used in any terminal of the grammar: s.named_symbol('known_char') #all the rest: s.named_symbol('nonspecial_char') #these should eventually be defined in lemon lang s.named_symbol('maybe_spaces') s.sequence('maybe_spaces', s.syms.maybe_spaces, s.known_char(' '), action=valuator_actions.ignore, min=0) s.named_symbol('whitespace_char') s.named_symbol('maybe_whitespace') for x in ' \n\t': s.rule("whitespace_char", s.syms.whitespace_char, s.known_char(x)) s.sequence('maybe_whitespace', s.syms.maybe_whitespace, s.syms.whitespace_char,action=valuator_actions.ignore, min=0) s.scope = scope #"anything" means we are parsing for the editor and want to parse any fragment of the language anything = start==None if anything: s.start=s.named_symbol('start') else: s.start = start.symbol(s) if s.debug: log("start=%s", s.symbol2debug_name(s.start) ) import nodes def is_relevant_for(s, scope): sub = s.ch.sub.parsed sup = s.ch.sup.parsed aa = nodes.deref_def(sub) bb = nodes.deref_def(sup) a = aa in scope b = bb in scope logging.getLogger("scope").debug("%s is_relevant_for scope? %s %s (%s, %s)", s.tostr(), a, b, aa, bb) return a and b for i in full_scope: if type(i).__name__ == "WorksAs": if is_relevant_for(i, scope): if not i in scope: scope.append(i) for i in scope: i.symbol(s) if anything: for i in scope: if i.symbol(s) != None: if args.log_parsing: if s.debug: log(i.symbol(s)) rulename = 'start is %s' % s.symbol2debug_name(i.symbol(s)) else: rulename = "" s.rule(rulename , s.start, i.symbol(s)) s.worksas_magic(scope) s.anything_excepts()