Example #1
0
def exploreCFG(cfg, length_limit):
    """
    Generate strings with the CFG,
    without ever allowing an intermediate expression to exceed the length_limit.
    
    Note that not all strings with length <= length_limit that can be generated will be generated.
    For example, with S -> 1S | e and length_limit = 2, we won't be able to generate "11"
    because in the sequence S -> 1S -> 11S -> 11, 11S is too long and we would stop exploring there.
    """
    cfg = CFG(cfg.start(), [splitProdRhs(prod) for prod in cfg.productions()])

    finished = set()  # Expressions with no nonterminals left
    visited = set(
    )  # Expressions with nonterminals that have already been explored
    to_explore = [(cfg.start(), )]

    while to_explore:
        expr = to_explore.pop()
        if expr in visited or len(expr) > length_limit:
            continue

        for i in range(len(expr)):
            if isinstance(expr[i], Nonterminal):
                break
        else:
            finished.add(expr)
            continue
        visited.add(expr)

        for prod in cfg.productions(lhs=expr[i]):
            to_explore.append(expr[:i] + prod.rhs() + expr[i + 1:])

    return finished
Example #2
0
def create_taskgrammar(grammar, task, encoders):
    logger.info('Creating specific grammar for task %s' % task)
    productions = grammar.productions(Nonterminal(task))
    start_token = Nonterminal('S')
    new_productions = []

    for start_production in productions:
        first_token = start_production.rhs()[0]
        if is_nonterminal(first_token) and first_token.symbol().endswith('_TASK'):
            for new_start_production in grammar.productions(first_token):
                new_productions.append(Production(start_token, new_start_production.rhs()))
        else:
            new_productions.append(Production(start_token, start_production.rhs()))

    for production in grammar.productions():
        for new_production in new_productions:
            if production.lhs() in new_production.rhs() and production not in new_productions:
                if production.lhs().symbol() == 'ENCODERS':  # Use encoders only for types of features in the dataset
                    if len(encoders) > 0:
                        new_productions.append(Production(production.lhs(), [Nonterminal(e) for e in encoders]))
                    else:
                        new_productions.append(Production(production.lhs(), ['E']))
                else:
                    new_productions.append(production)

    task_grammar = CFG(start_token, new_productions)

    with open(TASK_GRAMMAR_PATH, 'w') as fout:
        fout.write('\n'.join([str(x) for x in task_grammar.productions()]))

    return task_grammar
Example #3
0
def create_completegrammar(primitives):
    base_grammar = load_grammar(BASE_GRAMMAR_PATH)
    new_productions = []

    for production in base_grammar.productions():
        primitive_type = production.lhs().symbol()
        if primitive_type in primitives:
            new_rhs_list = []
            for token in production.rhs():
                if isinstance(token, str) and token.startswith('primitive_'):
                    new_rhs_list.append(primitives[primitive_type])
                else:
                    new_rhs_list.append([token])
            for new_rhs in itertools.product(*new_rhs_list):
                new_productions.append(Production(production.lhs(), new_rhs))
        else:
            new_productions.append(production)

    complete_grammar = CFG(Nonterminal('S'), new_productions)

    with open(COMPLETE_GRAMMAR_PATH, 'w') as fout:
        fout.write('\n'.join([str(x) for x in complete_grammar.productions()]))

    return complete_grammar
Example #4
0
File: cfg.py Project: Geolem/nltk
class CFGEditor:
    """
    A dialog window for creating and editing context free grammars.
    ``CFGEditor`` imposes the following restrictions:

    - All nonterminals must be strings consisting of word
      characters.
    - All terminals must be strings consisting of word characters
      and space characters.
    """

    # Regular expressions used by _analyze_line.  Precompile them, so
    # we can process the text faster.
    ARROW = SymbolWidget.SYMBOLS["rightarrow"]
    _LHS_RE = re.compile(r"(^\s*\w+\s*)(->|(" + ARROW + "))")
    _ARROW_RE = re.compile(r"\s*(->|(" + ARROW + r"))\s*")
    _PRODUCTION_RE = re.compile(r"(^\s*\w+\s*)" + "(->|("  # LHS
                                + ARROW + r"))\s*" +
                                r"((\w+|'[\w ]*'|\"[\w ]*\"|\|)\s*)*$"  # arrow
                                )  # RHS
    _TOKEN_RE = re.compile("\\w+|->|'[\\w ]+'|\"[\\w ]+\"|(" + ARROW + ")")
    _BOLD = ("helvetica", -12, "bold")

    def __init__(self, parent, cfg=None, set_cfg_callback=None):
        self._parent = parent
        if cfg is not None:
            self._cfg = cfg
        else:
            self._cfg = CFG(Nonterminal("S"), [])
        self._set_cfg_callback = set_cfg_callback

        self._highlight_matching_nonterminals = 1

        # Create the top-level window.
        self._top = Toplevel(parent)
        self._init_bindings()

        self._init_startframe()
        self._startframe.pack(side="top", fill="x", expand=0)
        self._init_prodframe()
        self._prodframe.pack(side="top", fill="both", expand=1)
        self._init_buttons()
        self._buttonframe.pack(side="bottom", fill="x", expand=0)

        self._textwidget.focus()

    def _init_startframe(self):
        frame = self._startframe = Frame(self._top)
        self._start = Entry(frame)
        self._start.pack(side="right")
        Label(frame, text="Start Symbol:").pack(side="right")
        Label(frame, text="Productions:").pack(side="left")
        self._start.insert(0, self._cfg.start().symbol())

    def _init_buttons(self):
        frame = self._buttonframe = Frame(self._top)
        Button(frame, text="Ok", command=self._ok, underline=0,
               takefocus=0).pack(side="left")
        Button(frame,
               text="Apply",
               command=self._apply,
               underline=0,
               takefocus=0).pack(side="left")
        Button(frame,
               text="Reset",
               command=self._reset,
               underline=0,
               takefocus=0).pack(side="left")
        Button(frame,
               text="Cancel",
               command=self._cancel,
               underline=0,
               takefocus=0).pack(side="left")
        Button(frame,
               text="Help",
               command=self._help,
               underline=0,
               takefocus=0).pack(side="right")

    def _init_bindings(self):
        self._top.title("CFG Editor")
        self._top.bind("<Control-q>", self._cancel)
        self._top.bind("<Alt-q>", self._cancel)
        self._top.bind("<Control-d>", self._cancel)
        # self._top.bind('<Control-x>', self._cancel)
        self._top.bind("<Alt-x>", self._cancel)
        self._top.bind("<Escape>", self._cancel)
        # self._top.bind('<Control-c>', self._cancel)
        self._top.bind("<Alt-c>", self._cancel)

        self._top.bind("<Control-o>", self._ok)
        self._top.bind("<Alt-o>", self._ok)
        self._top.bind("<Control-a>", self._apply)
        self._top.bind("<Alt-a>", self._apply)
        self._top.bind("<Control-r>", self._reset)
        self._top.bind("<Alt-r>", self._reset)
        self._top.bind("<Control-h>", self._help)
        self._top.bind("<Alt-h>", self._help)
        self._top.bind("<F1>", self._help)

    def _init_prodframe(self):
        self._prodframe = Frame(self._top)

        # Create the basic Text widget & scrollbar.
        self._textwidget = Text(self._prodframe,
                                background="#e0e0e0",
                                exportselection=1)
        self._textscroll = Scrollbar(self._prodframe,
                                     takefocus=0,
                                     orient="vertical")
        self._textwidget.config(yscrollcommand=self._textscroll.set)
        self._textscroll.config(command=self._textwidget.yview)
        self._textscroll.pack(side="right", fill="y")
        self._textwidget.pack(expand=1, fill="both", side="left")

        # Initialize the colorization tags.  Each nonterminal gets its
        # own tag, so they aren't listed here.
        self._textwidget.tag_config("terminal", foreground="#006000")
        self._textwidget.tag_config("arrow", font="symbol")
        self._textwidget.tag_config("error", background="red")

        # Keep track of what line they're on.  We use that to remember
        # to re-analyze a line whenever they leave it.
        self._linenum = 0

        # Expand "->" to an arrow.
        self._top.bind(">", self._replace_arrows)

        # Re-colorize lines when appropriate.
        self._top.bind("<<Paste>>", self._analyze)
        self._top.bind("<KeyPress>", self._check_analyze)
        self._top.bind("<ButtonPress>", self._check_analyze)

        # Tab cycles focus. (why doesn't this work??)
        def cycle(e, textwidget=self._textwidget):
            textwidget.tk_focusNext().focus()

        self._textwidget.bind("<Tab>", cycle)

        prod_tuples = [(p.lhs(), [p.rhs()]) for p in self._cfg.productions()]
        for i in range(len(prod_tuples) - 1, 0, -1):
            if prod_tuples[i][0] == prod_tuples[i - 1][0]:
                if () in prod_tuples[i][1]:
                    continue
                if () in prod_tuples[i - 1][1]:
                    continue
                print(prod_tuples[i - 1][1])
                print(prod_tuples[i][1])
                prod_tuples[i - 1][1].extend(prod_tuples[i][1])
                del prod_tuples[i]

        for lhs, rhss in prod_tuples:
            print(lhs, rhss)
            s = "%s ->" % lhs
            for rhs in rhss:
                for elt in rhs:
                    if isinstance(elt, Nonterminal):
                        s += " %s" % elt
                    else:
                        s += " %r" % elt
                s += " |"
            s = s[:-2] + "\n"
            self._textwidget.insert("end", s)

        self._analyze()

    #         # Add the producitons to the text widget, and colorize them.
    #         prod_by_lhs = {}
    #         for prod in self._cfg.productions():
    #             if len(prod.rhs()) > 0:
    #                 prod_by_lhs.setdefault(prod.lhs(),[]).append(prod)
    #         for (lhs, prods) in prod_by_lhs.items():
    #             self._textwidget.insert('end', '%s ->' % lhs)
    #             self._textwidget.insert('end', self._rhs(prods[0]))
    #             for prod in prods[1:]:
    #                 print '\t|'+self._rhs(prod),
    #                 self._textwidget.insert('end', '\t|'+self._rhs(prod))
    #             print
    #             self._textwidget.insert('end', '\n')
    #         for prod in self._cfg.productions():
    #             if len(prod.rhs()) == 0:
    #                 self._textwidget.insert('end', '%s' % prod)
    #         self._analyze()

    #     def _rhs(self, prod):
    #         s = ''
    #         for elt in prod.rhs():
    #             if isinstance(elt, Nonterminal): s += ' %s' % elt.symbol()
    #             else: s += ' %r' % elt
    #         return s

    def _clear_tags(self, linenum):
        """
        Remove all tags (except ``arrow`` and ``sel``) from the given
        line of the text widget used for editing the productions.
        """
        start = "%d.0" % linenum
        end = "%d.end" % linenum
        for tag in self._textwidget.tag_names():
            if tag not in ("arrow", "sel"):
                self._textwidget.tag_remove(tag, start, end)

    def _check_analyze(self, *e):
        """
        Check if we've moved to a new line.  If we have, then remove
        all colorization from the line we moved to, and re-colorize
        the line that we moved from.
        """
        linenum = int(self._textwidget.index("insert").split(".")[0])
        if linenum != self._linenum:
            self._clear_tags(linenum)
            self._analyze_line(self._linenum)
            self._linenum = linenum

    def _replace_arrows(self, *e):
        """
        Replace any ``'->'`` text strings with arrows (char \\256, in
        symbol font).  This searches the whole buffer, but is fast
        enough to be done anytime they press '>'.
        """
        arrow = "1.0"
        while True:
            arrow = self._textwidget.search("->", arrow, "end+1char")
            if arrow == "":
                break
            self._textwidget.delete(arrow, arrow + "+2char")
            self._textwidget.insert(arrow, self.ARROW, "arrow")
            self._textwidget.insert(arrow, "\t")

        arrow = "1.0"
        while True:
            arrow = self._textwidget.search(self.ARROW, arrow + "+1char",
                                            "end+1char")
            if arrow == "":
                break
            self._textwidget.tag_add("arrow", arrow, arrow + "+1char")

    def _analyze_token(self, match, linenum):
        """
        Given a line number and a regexp match for a token on that
        line, colorize the token.  Note that the regexp match gives us
        the token's text, start index (on the line), and end index (on
        the line).
        """
        # What type of token is it?
        if match.group()[0] in "'\"":
            tag = "terminal"
        elif match.group() in ("->", self.ARROW):
            tag = "arrow"
        else:
            # If it's a nonterminal, then set up new bindings, so we
            # can highlight all instances of that nonterminal when we
            # put the mouse over it.
            tag = "nonterminal_" + match.group()
            if tag not in self._textwidget.tag_names():
                self._init_nonterminal_tag(tag)

        start = "%d.%d" % (linenum, match.start())
        end = "%d.%d" % (linenum, match.end())
        self._textwidget.tag_add(tag, start, end)

    def _init_nonterminal_tag(self, tag, foreground="blue"):
        self._textwidget.tag_config(tag,
                                    foreground=foreground,
                                    font=CFGEditor._BOLD)
        if not self._highlight_matching_nonterminals:
            return

        def enter(e, textwidget=self._textwidget, tag=tag):
            textwidget.tag_config(tag, background="#80ff80")

        def leave(e, textwidget=self._textwidget, tag=tag):
            textwidget.tag_config(tag, background="")

        self._textwidget.tag_bind(tag, "<Enter>", enter)
        self._textwidget.tag_bind(tag, "<Leave>", leave)

    def _analyze_line(self, linenum):
        """
        Colorize a given line.
        """
        # Get rid of any tags that were previously on the line.
        self._clear_tags(linenum)

        # Get the line line's text string.
        line = self._textwidget.get(
            repr(linenum) + ".0",
            repr(linenum) + ".end")

        # If it's a valid production, then colorize each token.
        if CFGEditor._PRODUCTION_RE.match(line):
            # It's valid; Use _TOKEN_RE to tokenize the production,
            # and call analyze_token on each token.
            def analyze_token(match, self=self, linenum=linenum):
                self._analyze_token(match, linenum)
                return ""

            CFGEditor._TOKEN_RE.sub(analyze_token, line)
        elif line.strip() != "":
            # It's invalid; show the user where the error is.
            self._mark_error(linenum, line)

    def _mark_error(self, linenum, line):
        """
        Mark the location of an error in a line.
        """
        arrowmatch = CFGEditor._ARROW_RE.search(line)
        if not arrowmatch:
            # If there's no arrow at all, highlight the whole line.
            start = "%d.0" % linenum
            end = "%d.end" % linenum
        elif not CFGEditor._LHS_RE.match(line):
            # Otherwise, if the LHS is bad, highlight it.
            start = "%d.0" % linenum
            end = "%d.%d" % (linenum, arrowmatch.start())
        else:
            # Otherwise, highlight the RHS.
            start = "%d.%d" % (linenum, arrowmatch.end())
            end = "%d.end" % linenum

        # If we're highlighting 0 chars, highlight the whole line.
        if self._textwidget.compare(start, "==", end):
            start = "%d.0" % linenum
            end = "%d.end" % linenum
        self._textwidget.tag_add("error", start, end)

    def _analyze(self, *e):
        """
        Replace ``->`` with arrows, and colorize the entire buffer.
        """
        self._replace_arrows()
        numlines = int(self._textwidget.index("end").split(".")[0])
        for linenum in range(1, numlines + 1):  # line numbers start at 1.
            self._analyze_line(linenum)

    def _parse_productions(self):
        """
        Parse the current contents of the textwidget buffer, to create
        a list of productions.
        """
        productions = []

        # Get the text, normalize it, and split it into lines.
        text = self._textwidget.get("1.0", "end")
        text = re.sub(self.ARROW, "->", text)
        text = re.sub("\t", " ", text)
        lines = text.split("\n")

        # Convert each line to a CFG production
        for line in lines:
            line = line.strip()
            if line == "":
                continue
            productions += _read_cfg_production(line)
            # if line.strip() == '': continue
            # if not CFGEditor._PRODUCTION_RE.match(line):
            #    raise ValueError('Bad production string %r' % line)
            #
            # (lhs_str, rhs_str) = line.split('->')
            # lhs = Nonterminal(lhs_str.strip())
            # rhs = []
            # def parse_token(match, rhs=rhs):
            #    token = match.group()
            #    if token[0] in "'\"": rhs.append(token[1:-1])
            #    else: rhs.append(Nonterminal(token))
            #    return ''
            # CFGEditor._TOKEN_RE.sub(parse_token, rhs_str)
            #
            # productions.append(Production(lhs, *rhs))

        return productions

    def _destroy(self, *e):
        if self._top is None:
            return
        self._top.destroy()
        self._top = None

    def _ok(self, *e):
        self._apply()
        self._destroy()

    def _apply(self, *e):
        productions = self._parse_productions()
        start = Nonterminal(self._start.get())
        cfg = CFG(start, productions)
        if self._set_cfg_callback is not None:
            self._set_cfg_callback(cfg)

    def _reset(self, *e):
        self._textwidget.delete("1.0", "end")
        for production in self._cfg.productions():
            self._textwidget.insert("end", "%s\n" % production)
        self._analyze()
        if self._set_cfg_callback is not None:
            self._set_cfg_callback(self._cfg)

    def _cancel(self, *e):
        try:
            self._reset()
        except:
            pass
        self._destroy()

    def _help(self, *e):
        # The default font's not very legible; try using 'fixed' instead.
        try:
            ShowText(
                self._parent,
                "Help: Chart Parser Demo",
                (_CFGEditor_HELP).strip(),
                width=75,
                font="fixed",
            )
        except:
            ShowText(
                self._parent,
                "Help: Chart Parser Demo",
                (_CFGEditor_HELP).strip(),
                width=75,
            )
Example #5
0
s = ''

print('Bulding tree from parsed sentences')
with open('parsed_sentences.txt') as f:
    sentences = list(f) + ['']
    for line in sentences:
        line = line.strip()
        if len(line) > 0:
            if line[0] != '#':
                s += line
        elif len(s) > 0:
            t = tree.Tree.fromstring(s)
            prod += t.productions()
            t.chomsky_normal_form()
            t.collapse_unary(collapsePOS=True)
            prod_cnf += t.productions()
            s = ''

prod = set(prod)
prod_cnf = set(prod_cnf)

print('Writing CFG to file with %d productions' % len(prod))
grammar = CFG(Nonterminal('ROOT'), prod)
with open('grammar.cfg', 'w') as f:
    f.write('\n'.join([str(p) for p in grammar.productions()]))

print('Writing CFG (CNF) to file with %d productions' % len(prod_cnf))
grammar_cnf = CFG(Nonterminal('ROOT'), prod_cnf)
with open('grammar_cnf.cfg', 'w') as f:
    f.write('\n'.join([str(p) for p in grammar_cnf.productions()]))
Example #6
0
class CFGEditor(object):
    """
    A dialog window for creating and editing context free grammars.
    ``CFGEditor`` imposes the following restrictions:

    - All nonterminals must be strings consisting of word
      characters.
    - All terminals must be strings consisting of word characters
      and space characters.
    """

    # Regular expressions used by _analyze_line.  Precompile them, so
    # we can process the text faster.
    ARROW = SymbolWidget.SYMBOLS['rightarrow']
    _LHS_RE = re.compile(r"(^\s*\w+\s*)(->|(" + ARROW + "))")
    _ARROW_RE = re.compile("\s*(->|(" + ARROW + "))\s*")
    _PRODUCTION_RE = re.compile(
        r"(^\s*\w+\s*)"
        + "(->|("  # LHS
        + ARROW
        + "))\s*"
        + r"((\w+|'[\w ]*'|\"[\w ]*\"|\|)\s*)*$"  # arrow
    )  # RHS
    _TOKEN_RE = re.compile("\\w+|->|'[\\w ]+'|\"[\\w ]+\"|(" + ARROW + ")")
    _BOLD = ('helvetica', -12, 'bold')

    def __init__(self, parent, cfg=None, set_cfg_callback=None):
        self._parent = parent
        if cfg is not None:
            self._cfg = cfg
        else:
            self._cfg = CFG(Nonterminal('S'), [])
        self._set_cfg_callback = set_cfg_callback

        self._highlight_matching_nonterminals = 1

        # Create the top-level window.
        self._top = Toplevel(parent)
        self._init_bindings()

        self._init_startframe()
        self._startframe.pack(side='top', fill='x', expand=0)
        self._init_prodframe()
        self._prodframe.pack(side='top', fill='both', expand=1)
        self._init_buttons()
        self._buttonframe.pack(side='bottom', fill='x', expand=0)

        self._textwidget.focus()

    def _init_startframe(self):
        frame = self._startframe = Frame(self._top)
        self._start = Entry(frame)
        self._start.pack(side='right')
        Label(frame, text='Start Symbol:').pack(side='right')
        Label(frame, text='Productions:').pack(side='left')
        self._start.insert(0, self._cfg.start().symbol())

    def _init_buttons(self):
        frame = self._buttonframe = Frame(self._top)
        Button(frame, text='Ok', command=self._ok, underline=0, takefocus=0).pack(
            side='left'
        )
        Button(frame, text='Apply', command=self._apply, underline=0, takefocus=0).pack(
            side='left'
        )
        Button(frame, text='Reset', command=self._reset, underline=0, takefocus=0).pack(
            side='left'
        )
        Button(
            frame, text='Cancel', command=self._cancel, underline=0, takefocus=0
        ).pack(side='left')
        Button(frame, text='Help', command=self._help, underline=0, takefocus=0).pack(
            side='right'
        )

    def _init_bindings(self):
        self._top.title('CFG Editor')
        self._top.bind('<Control-q>', self._cancel)
        self._top.bind('<Alt-q>', self._cancel)
        self._top.bind('<Control-d>', self._cancel)
        # self._top.bind('<Control-x>', self._cancel)
        self._top.bind('<Alt-x>', self._cancel)
        self._top.bind('<Escape>', self._cancel)
        # self._top.bind('<Control-c>', self._cancel)
        self._top.bind('<Alt-c>', self._cancel)

        self._top.bind('<Control-o>', self._ok)
        self._top.bind('<Alt-o>', self._ok)
        self._top.bind('<Control-a>', self._apply)
        self._top.bind('<Alt-a>', self._apply)
        self._top.bind('<Control-r>', self._reset)
        self._top.bind('<Alt-r>', self._reset)
        self._top.bind('<Control-h>', self._help)
        self._top.bind('<Alt-h>', self._help)
        self._top.bind('<F1>', self._help)

    def _init_prodframe(self):
        self._prodframe = Frame(self._top)

        # Create the basic Text widget & scrollbar.
        self._textwidget = Text(
            self._prodframe, background='#e0e0e0', exportselection=1
        )
        self._textscroll = Scrollbar(self._prodframe, takefocus=0, orient='vertical')
        self._textwidget.config(yscrollcommand=self._textscroll.set)
        self._textscroll.config(command=self._textwidget.yview)
        self._textscroll.pack(side='right', fill='y')
        self._textwidget.pack(expand=1, fill='both', side='left')

        # Initialize the colorization tags.  Each nonterminal gets its
        # own tag, so they aren't listed here.
        self._textwidget.tag_config('terminal', foreground='#006000')
        self._textwidget.tag_config('arrow', font='symbol')
        self._textwidget.tag_config('error', background='red')

        # Keep track of what line they're on.  We use that to remember
        # to re-analyze a line whenever they leave it.
        self._linenum = 0

        # Expand "->" to an arrow.
        self._top.bind('>', self._replace_arrows)

        # Re-colorize lines when appropriate.
        self._top.bind('<<Paste>>', self._analyze)
        self._top.bind('<KeyPress>', self._check_analyze)
        self._top.bind('<ButtonPress>', self._check_analyze)

        # Tab cycles focus. (why doesn't this work??)
        def cycle(e, textwidget=self._textwidget):
            textwidget.tk_focusNext().focus()

        self._textwidget.bind('<Tab>', cycle)

        prod_tuples = [(p.lhs(), [p.rhs()]) for p in self._cfg.productions()]
        for i in range(len(prod_tuples) - 1, 0, -1):
            if prod_tuples[i][0] == prod_tuples[i - 1][0]:
                if () in prod_tuples[i][1]:
                    continue
                if () in prod_tuples[i - 1][1]:
                    continue
                print(prod_tuples[i - 1][1])
                print(prod_tuples[i][1])
                prod_tuples[i - 1][1].extend(prod_tuples[i][1])
                del prod_tuples[i]

        for lhs, rhss in prod_tuples:
            print(lhs, rhss)
            s = '%s ->' % lhs
            for rhs in rhss:
                for elt in rhs:
                    if isinstance(elt, Nonterminal):
                        s += ' %s' % elt
                    else:
                        s += ' %r' % elt
                s += ' |'
            s = s[:-2] + '\n'
            self._textwidget.insert('end', s)

        self._analyze()

    #         # Add the producitons to the text widget, and colorize them.
    #         prod_by_lhs = {}
    #         for prod in self._cfg.productions():
    #             if len(prod.rhs()) > 0:
    #                 prod_by_lhs.setdefault(prod.lhs(),[]).append(prod)
    #         for (lhs, prods) in prod_by_lhs.items():
    #             self._textwidget.insert('end', '%s ->' % lhs)
    #             self._textwidget.insert('end', self._rhs(prods[0]))
    #             for prod in prods[1:]:
    #                 print '\t|'+self._rhs(prod),
    #                 self._textwidget.insert('end', '\t|'+self._rhs(prod))
    #             print
    #             self._textwidget.insert('end', '\n')
    #         for prod in self._cfg.productions():
    #             if len(prod.rhs()) == 0:
    #                 self._textwidget.insert('end', '%s' % prod)
    #         self._analyze()

    #     def _rhs(self, prod):
    #         s = ''
    #         for elt in prod.rhs():
    #             if isinstance(elt, Nonterminal): s += ' %s' % elt.symbol()
    #             else: s += ' %r' % elt
    #         return s

    def _clear_tags(self, linenum):
        """
        Remove all tags (except ``arrow`` and ``sel``) from the given
        line of the text widget used for editing the productions.
        """
        start = '%d.0' % linenum
        end = '%d.end' % linenum
        for tag in self._textwidget.tag_names():
            if tag not in ('arrow', 'sel'):
                self._textwidget.tag_remove(tag, start, end)

    def _check_analyze(self, *e):
        """
        Check if we've moved to a new line.  If we have, then remove
        all colorization from the line we moved to, and re-colorize
        the line that we moved from.
        """
        linenum = int(self._textwidget.index('insert').split('.')[0])
        if linenum != self._linenum:
            self._clear_tags(linenum)
            self._analyze_line(self._linenum)
            self._linenum = linenum

    def _replace_arrows(self, *e):
        """
        Replace any ``'->'`` text strings with arrows (char \\256, in
        symbol font).  This searches the whole buffer, but is fast
        enough to be done anytime they press '>'.
        """
        arrow = '1.0'
        while True:
            arrow = self._textwidget.search('->', arrow, 'end+1char')
            if arrow == '':
                break
            self._textwidget.delete(arrow, arrow + '+2char')
            self._textwidget.insert(arrow, self.ARROW, 'arrow')
            self._textwidget.insert(arrow, '\t')

        arrow = '1.0'
        while True:
            arrow = self._textwidget.search(self.ARROW, arrow + '+1char', 'end+1char')
            if arrow == '':
                break
            self._textwidget.tag_add('arrow', arrow, arrow + '+1char')

    def _analyze_token(self, match, linenum):
        """
        Given a line number and a regexp match for a token on that
        line, colorize the token.  Note that the regexp match gives us
        the token's text, start index (on the line), and end index (on
        the line).
        """
        # What type of token is it?
        if match.group()[0] in "'\"":
            tag = 'terminal'
        elif match.group() in ('->', self.ARROW):
            tag = 'arrow'
        else:
            # If it's a nonterminal, then set up new bindings, so we
            # can highlight all instances of that nonterminal when we
            # put the mouse over it.
            tag = 'nonterminal_' + match.group()
            if tag not in self._textwidget.tag_names():
                self._init_nonterminal_tag(tag)

        start = '%d.%d' % (linenum, match.start())
        end = '%d.%d' % (linenum, match.end())
        self._textwidget.tag_add(tag, start, end)

    def _init_nonterminal_tag(self, tag, foreground='blue'):
        self._textwidget.tag_config(tag, foreground=foreground, font=CFGEditor._BOLD)
        if not self._highlight_matching_nonterminals:
            return

        def enter(e, textwidget=self._textwidget, tag=tag):
            textwidget.tag_config(tag, background='#80ff80')

        def leave(e, textwidget=self._textwidget, tag=tag):
            textwidget.tag_config(tag, background='')

        self._textwidget.tag_bind(tag, '<Enter>', enter)
        self._textwidget.tag_bind(tag, '<Leave>', leave)

    def _analyze_line(self, linenum):
        """
        Colorize a given line.
        """
        # Get rid of any tags that were previously on the line.
        self._clear_tags(linenum)

        # Get the line line's text string.
        line = self._textwidget.get(repr(linenum) + '.0', repr(linenum) + '.end')

        # If it's a valid production, then colorize each token.
        if CFGEditor._PRODUCTION_RE.match(line):
            # It's valid; Use _TOKEN_RE to tokenize the production,
            # and call analyze_token on each token.
            def analyze_token(match, self=self, linenum=linenum):
                self._analyze_token(match, linenum)
                return ''

            CFGEditor._TOKEN_RE.sub(analyze_token, line)
        elif line.strip() != '':
            # It's invalid; show the user where the error is.
            self._mark_error(linenum, line)

    def _mark_error(self, linenum, line):
        """
        Mark the location of an error in a line.
        """
        arrowmatch = CFGEditor._ARROW_RE.search(line)
        if not arrowmatch:
            # If there's no arrow at all, highlight the whole line.
            start = '%d.0' % linenum
            end = '%d.end' % linenum
        elif not CFGEditor._LHS_RE.match(line):
            # Otherwise, if the LHS is bad, highlight it.
            start = '%d.0' % linenum
            end = '%d.%d' % (linenum, arrowmatch.start())
        else:
            # Otherwise, highlight the RHS.
            start = '%d.%d' % (linenum, arrowmatch.end())
            end = '%d.end' % linenum

        # If we're highlighting 0 chars, highlight the whole line.
        if self._textwidget.compare(start, '==', end):
            start = '%d.0' % linenum
            end = '%d.end' % linenum
        self._textwidget.tag_add('error', start, end)

    def _analyze(self, *e):
        """
        Replace ``->`` with arrows, and colorize the entire buffer.
        """
        self._replace_arrows()
        numlines = int(self._textwidget.index('end').split('.')[0])
        for linenum in range(1, numlines + 1):  # line numbers start at 1.
            self._analyze_line(linenum)

    def _parse_productions(self):
        """
        Parse the current contents of the textwidget buffer, to create
        a list of productions.
        """
        productions = []

        # Get the text, normalize it, and split it into lines.
        text = self._textwidget.get('1.0', 'end')
        text = re.sub(self.ARROW, '->', text)
        text = re.sub('\t', ' ', text)
        lines = text.split('\n')

        # Convert each line to a CFG production
        for line in lines:
            line = line.strip()
            if line == '':
                continue
            productions += _read_cfg_production(line)
            # if line.strip() == '': continue
            # if not CFGEditor._PRODUCTION_RE.match(line):
            #    raise ValueError('Bad production string %r' % line)
            #
            # (lhs_str, rhs_str) = line.split('->')
            # lhs = Nonterminal(lhs_str.strip())
            # rhs = []
            # def parse_token(match, rhs=rhs):
            #    token = match.group()
            #    if token[0] in "'\"": rhs.append(token[1:-1])
            #    else: rhs.append(Nonterminal(token))
            #    return ''
            # CFGEditor._TOKEN_RE.sub(parse_token, rhs_str)
            #
            # productions.append(Production(lhs, *rhs))

        return productions

    def _destroy(self, *e):
        if self._top is None:
            return
        self._top.destroy()
        self._top = None

    def _ok(self, *e):
        self._apply()
        self._destroy()

    def _apply(self, *e):
        productions = self._parse_productions()
        start = Nonterminal(self._start.get())
        cfg = CFG(start, productions)
        if self._set_cfg_callback is not None:
            self._set_cfg_callback(cfg)

    def _reset(self, *e):
        self._textwidget.delete('1.0', 'end')
        for production in self._cfg.productions():
            self._textwidget.insert('end', '%s\n' % production)
        self._analyze()
        if self._set_cfg_callback is not None:
            self._set_cfg_callback(self._cfg)

    def _cancel(self, *e):
        try:
            self._reset()
        except:
            pass
        self._destroy()

    def _help(self, *e):
        # The default font's not very legible; try using 'fixed' instead.
        try:
            ShowText(
                self._parent,
                'Help: Chart Parser Demo',
                (_CFGEditor_HELP).strip(),
                width=75,
                font='fixed',
            )
        except:
            ShowText(
                self._parent,
                'Help: Chart Parser Demo',
                (_CFGEditor_HELP).strip(),
                width=75,
            )
            )  # add ROOT tag back at the beginning of the tree, and output a tuple with tree and the stimulus ID (num)

"""Fix up the last tree in stimulus 0.. stimulus 0 excluded the last IU (="stating that") in the speaker's turn, to make it fit with the desired IU count. Those two words were included in the text given to the parser in case the omission would have caused the parser difficulty, but we don't want to include them in our analysis since the subjects didn't actually hear them.. I removed those two words from the stanford parse tree text file that we read in earlier, but now I need to add in the final parentheses to make the parse processable by the Tree function
"""
stimtrees[5] = (stimtrees[5][0] + ")))))))\n(. ?))\n(. .)))\n\n", stimtrees[5][1])

processed_trees = [
    Tree.fromstring(tree[0]) for tree in stimtrees
]  # create Tree structure and viewable tree image for each tree
processed_trees[0]  # shows tree image for stimulus 0
# prods=[t.productions() for t in processed_trees]
rules = reduce(lambda x, y: x + y, [t.productions() for t in processed_trees])
mycfg = CFG(Nonterminal("ROOT"), rules)
mycfg.start()
mycfg.productions(
    lhs=Nonterminal("PP")
)  # Will print productions for the specified nonterminal item (e.g. "PP", a prepositional phrase), where the PP is the left-hand side of the rule (e.g. PP -> whatever)

#%%
# ==============================================================================
# Loop through Production rules to extract Syntactic Tags and Terminal Words, keep track of Clause boundaries by looking for the first word appearing after an "S" tag
# ==============================================================================
words = []
counter = 0
tags = []
ruleset = []
ClauseBoundary = (
    False
)  # below, this variable will be set to TRUE if the rule begins with 'S' (clause boundary), or FALSE the rule contains a terminal
for rule in rules:
    print counter
Example #8
0
class PrimalLearner(Learner):
    """
        Implementation of the primal algorithm of Yoshinaka (2011).
    """
    def __init__(self, text, oracle, k):
        """
        Initialize from a Text and an Oracle.

        :type text: oracles.Text
        :param text: A text

        :type oracle: oracles.Oracle
        :param oracle: An oracle

        :type k: int
        :param k: The grammar learned will have the k-FKP.
        """
        super(PrimalLearner, self).__init__()
        self._text = text
        self._oracle = oracle
        self._k = k

        # Algorithm state
        self._data = SentenceSet([])
        self._substrings = SentenceSet([])
        self._contexts = ContextSet([])
        self._eliminated_rules = set()
        self._num_steps = 0

        self._verbose = False

        # Current guess
        self._name_ctr = 0
        self._kernels = []
        self._nonterminals = dict()
        self._nt_contexts = dict()
        self._terminals = set()
        self._productions = set()
        self._start_symbol = Nonterminal("start")
        self._curr_guess = None
        self._curr_guess_parser = None

    def _new_name(self):
        """
        Generates a unique name.

        :rtype: int
        :return: A unique int
        """
        self._name_ctr += 1
        return str(self._name_ctr - 1)

    def _log(self, message):
        if self._verbose:
            print message

    def guess(self, verbose=None):
        """
        Makes a guess based on the next observation.
        Updates self._curr_guess.

        :rtype: CFG
        :returns: The next guess
        """
        if verbose is not None:
            self._verbose = verbose

        sentence = Sentence(next(self._text))
        self._num_steps += 1
        self._log("String {}: {}".format(self._num_steps, sentence))

        if sentence in self._data:
            self._log("String already seen")
            return self._curr_guess

        # Info from previous guess
        num_contexts = len(self._contexts)
        num_subs = len(self._substrings)
        if self._curr_guess is not None:
            num_nts = len(set(p.lhs()
                              for p in self._curr_guess.productions())) - 1
        else:
            num_nts = 0

        total_timer = Timer()
        total_timer.start()

        # Update data and terminals
        words = sentence.get_words()
        self._data.add(sentence)
        self._terminals.update(set(words))

        # Update contexts
        self._log("Updating contexts...")
        inds = range(0, len(words) + 1)
        contexts = [
            Context(words[:i], words[j:]) for i in inds for j in inds[i:]
        ]
        self._contexts.update(ContextSet(contexts))
        self._log(
            "{} new contexts added".format(len(self._contexts) - num_contexts))

        # Update substrings
        self._log("Updating substrings...")

        is_new_sentence = True
        if self._curr_guess_parser is not None:
            try:
                parses = self._curr_guess_parser.parse(words)
                is_new_sentence = len(list(parses)) == 0
            except:
                is_new_sentence = True

        if is_new_sentence:
            subs = [Sentence(words[i:j]) for i in inds for j in inds[i:]]
            self._substrings.update(SentenceSet(subs))
            self._log("{} new substrings added".format(
                len(self._substrings) - num_subs))
        else:
            self._log("Sentence already generated by current guess")

        # Construct the nonterminals
        self._log("Constructing nonterminals...")

        kernels = set()
        for i in range(1, self._k + 1):
            subsets = [
                SentenceSet(j) for j in combinations(self._substrings, i)
            ]
            kernels.update(subsets)

        for kernel in kernels:
            if kernel not in self._nonterminals:
                nt_name = self._new_name()
                contexts = self._oracle.restr_right_triangle(
                    kernel, self._contexts)
                nt = Nonterminal(nt_name)
                self._nonterminals[kernel] = nt
                self._nt_contexts[nt] = contexts

        # Get a set of nonterminals with unique contexts
        self._log("Removing equivalent nonterminals...")
        context_nts = {con: nt for nt, con in self._nt_contexts.iteritems()}
        self._log(
            "{} nonterminals removed".format(len(kernels) - len(context_nts)))
        self._log("{} new nonterminals constructed".format(
            len(context_nts) - num_nts))

        # Construct the rules
        self._log("Constructing rules...")
        self._productions = set()
        timer = Timer()

        # Lexical rules
        timer.start()
        for t in self._terminals:
            t_kernel = SentenceSet([Sentence([t])])
            t_nt = self._nonterminals[t_kernel]
            t_contexts = self._nt_contexts[t_nt]

            for contexts, nt in context_nts.iteritems():
                rule = Production(nt, [t])
                if rule in self._productions:
                    continue
                if rule in self._eliminated_rules:
                    continue

                if contexts.issubset(t_contexts):
                    self._productions.add(rule)
                else:
                    self._eliminated_rules.add(rule)

        timer.stop()
        num_lex = len(self._productions)
        self._log("{} lexical rules ({:.2f} secs)".format(
            num_lex, timer.elapsed()))

        # Binary rules
        timer.reset()
        timer.start()
        for kernel_l in self._nonterminals:
            for kernel_r in self._nonterminals:
                kernel_rhs = kernel_l + kernel_r
                sents_rhs = list(kernel_rhs.intersection(self._substrings))

                inds = range(len(sents_rhs) / self._k + 1)
                kers_rhs = [
                    sents_rhs[self._k * i:self._k * (i + 1)] for i in inds
                ]
                kers_rhs = [SentenceSet(k) for k in kers_rhs if len(k) > 0]

                nts_rhs = [self._nonterminals[k] for k in kers_rhs]
                contexts_nts_rhs = [self._nt_contexts[nt] for nt in nts_rhs]
                if len(contexts_nts_rhs) > 0:
                    contexts_rhs = contexts_nts_rhs[0].intersection(
                        *contexts_nts_rhs)
                else:
                    contexts_rhs = self._contexts

                # Membership queries
                new_strs_rhs = kernel_rhs.difference(SentenceSet(sents_rhs))
                new_contexts_rhs = self._oracle.restr_right_triangle(
                    new_strs_rhs, contexts_rhs)
                contexts_rhs.intersection_update(new_contexts_rhs)

                # Building the rules
                for contexts, nt in context_nts.iteritems():
                    nt_l = context_nts[self._nt_contexts[
                        self._nonterminals[kernel_l]]]
                    nt_r = context_nts[self._nt_contexts[
                        self._nonterminals[kernel_r]]]
                    rule = Production(nt, [nt_l, nt_r])
                    if rule in self._productions:
                        continue
                    if rule in self._eliminated_rules:
                        continue

                    if contexts.issubset(contexts_rhs):
                        self._productions.add(rule)
                    else:
                        self._eliminated_rules.add(rule)

        timer.stop()
        num_bin = len(self._productions) - num_lex
        self._log("{} binary rules ({:.2f} secs)".format(
            num_bin, timer.elapsed()))

        # Start rules
        timer.reset()
        timer.start()
        for contexts, nt in context_nts.iteritems():
            rule = Production(self._start_symbol, [nt])
            if rule in self._productions:
                continue
            if rule in self._eliminated_rules:
                continue
            if Context([], []) in contexts:
                self._productions.add(rule)
            else:
                self._eliminated_rules.add(rule)

        timer.stop()
        num_start = len(self._productions) - num_lex - num_bin
        self._log("{} start rules ({:.2f} secs)".format(
            num_start, timer.elapsed()))

        # Construct the grammar
        self._curr_guess = CFG(self._start_symbol, self._productions)
        self._curr_guess_parser = ChartParser(self._curr_guess)

        total_timer.stop()
        elapsed = total_timer.elapsed()
        num_rules = len(self._curr_guess.productions())
        self._log("Constructed grammar with {} rules ({:.2f} secs)".format(
            num_rules, elapsed))

        return self._curr_guess

    def save_as(self, filename, verbose=False):
        """
        Saves this PrimalLearner object to a file.

        :type filename: str
        :param filename: The name of the file to save to

        :type verbose: bool
        :param verbose: If true, information will be printed

        :return: None
        """
        f = open(filename, "wb")
        parser = self._curr_guess_parser
        self._curr_guess_parser = None
        pickle.dump(self, filename)
        self._curr_guess_parser = parser
        f.close()

    @staticmethod
    def from_grammar(grammar, k):
        """
        Instantiate a PrimalLearner from a grammar.

        :type grammar: CFG
        :param grammar: A grammar

        :type k: int
        :param k: The grammar learned will have the k-FKP.

        :rtype: PrimalLearner
        :return: A PrimalLearner
        """
        text = oracles.GrammarText(grammar)
        oracle = oracles.GrammarOracle(grammar)
        return PrimalLearner(text, oracle, k)