예제 #1
0
 def read_productions(self, productions_filename):
     productions = []
     with io.open(productions_filename, 'r', encoding='utf8') as f:
         for line in f:
             line = line.strip()
             components = line.split(u'+')
             lhs = Nonterminal(components[0])
             rhs = tuple([
                 Nonterminal(nt.strip()) for nt in components[1].split(u' ')
             ])
             prob = float(components[2])
             pp = ProbabilisticProduction(lhs, rhs, prob=prob)
             productions.append(pp)
     self.grammar = PCFG(Nonterminal('S'), productions)
예제 #2
0
파일: pcky.py 프로젝트: zyocum/pcfg
 def load_grammar(self, grammar_path):
     """Returns a PCFG from the specified file."""
     with open(grammar_path, 'r') as f:
         pcfg = PCFG.fromstring(f.read())
         if not pcfg.is_chomsky_normal_form():
             raise ValueError("grammar not in Chomsky normal form")
         return pcfg
예제 #3
0
파일: cky.py 프로젝트: BabisK/M36209P
def main():
    parser = argparse.ArgumentParser(description='CKY and PCKY')
    parser.add_argument('-g',
                        '--grammar',
                        help='Input file name',
                        required=True)
    parser.add_argument('-s',
                        '--sentence',
                        help='Input sentence',
                        required=True)
    args = parser.parse_args()

    grammar_text = None
    with open(args.grammar, 'r') as f:
        grammar_text = f.read()

    grammar = None
    result = None
    try:
        grammar = CFG.fromstring(grammar_text)
    except ValueError:
        grammar = PCFG.fromstring(grammar_text)

    if type(grammar) is CFG:
        result = cky(args.sentence, grammar)
    elif type(grammar) is PCFG:
        result = pcky(args.sentence, grammar)
예제 #4
0
def test_model():
    grammar_str = "S -> 'c' '*' 'x' [1.0]"
    grammar = PCFG.fromstring(grammar_str)
    parse_tree_code = "0"
    expression_str = "c*x"
    probability = 1.0
    symbols_params = ["c"]
    symbols_variables = ["x"]
    
    model = Model(expr = expression_str, 
                  grammar = grammar, 
                  code = parse_tree_code, 
                  p = probability,
                  sym_params = symbols_params,
                  sym_vars = symbols_variables)

    assert str(model) == expression_str
    
    assert model.get_error() == 10**8
    
    result = {"x":[1.2], "fun":0.001}
    model.set_estimated(result)
    
    assert str(model.full_expr(*model.params)) == "1.2*x"
    
    X = np.reshape(np.linspace(0, 5, 2), (2, 1))
    y = model.evaluate(X, *model.params)

    assert isinstance(y, type(np.array([0])))
    assert sum((y - np.array([0, 6.0]))**2) < 1e-15
def main(config):
    grammar_string = parse_induced_grammar( config.grammar )

    if config.output:
        with open(config.output, 'w') as f:
            f.write(grammar_string)
    grammar = PCFG.fromstring( grammar_string )
    grammar._start = Nonterminal('TOP') # Not sure whether this is allowed or breaks things

    # Create directory for parse_trees if it does not already exist
    if config.textfile:
        if not os.path.exists(config.output_parse):
            os.makedirs(config.output_parse)
    
    if config.textfile:
        parser = ViterbiParser(grammar)
        with open(config.textfile, 'r') as f:
            lines = f.read().splitlines() 
        for i, line in enumerate(lines):
            if i==config.number_parses:
                break
            print(f"Parsing sentence {i+1}")
            sent = line.split()
            for t in parser.parse(sent):
                TreeView(t)._cframe.print_to_file(f"{config.output_parse}/tree_{i}")
예제 #6
0
def generate(grammar: PCFG, fitness_fn):
    """ 
  Generate an utterance from the supplied grammar fitted to the fitness function

  Here's an example usage to generate a simple stack of three blocks:
  
  >>> def fitness(_, prefix):
  ...   return len(prefix) < 3
  >>> grammar = load_grammar("block_top -> 'block' block_top|")
  >>> generate(grammar, fitness)
  ('block', 'block', 'block')
  """
    sentence = [grammar.start()]

    for i in next_nonterm(sentence):
        productions = grammar.productions(lhs=sentence[i])

        try:
            # Attempt random selection if we are dealing with probabilistic rules
            best = np.random.choice(productions,
                                    p=[p.prob() for p in productions])
        except ValueError:
            # Probabilities do not sum to 1, so we're checking against a fitness function
            best_fitness = 0.0
            best_prods = []

            for prod in productions:
                fitness = fitness_fn(terminated(grammar, prod.rhs()),
                                     prefix=tuple(sentence[0:i]))

                if fitness > best_fitness:
                    best_prods = []

                if fitness >= best_fitness:
                    best_fitness = fitness

                    best_prods.append(prod)

                    if fitness >= 1.0:
                        break

            best = best_prods[-1]

        sentence.pop(i)
        [sentence.insert(i, s) for s in reversed(best.rhs())]

    return tuple(sentence)
예제 #7
0
 def __init__ (self, grammar, depth_limit = 100, repeat_limit = 100, symbols = {}):
     self.generator_type = "PCFG"
     self.coverage_dict = {}
     self.count_dict = {}
     self.depth_limit = depth_limit
     self.repeat_limit = repeat_limit
     self.symbols = symbols
     
     if isinstance(grammar, str):
         self.grammar = PCFG.fromstring(grammar)
     elif isinstance(grammar, type(PCFG.fromstring("S -> 'x' [1]"))):
         self.grammar = grammar
     else:
         raise TypeError ("Unknown grammar specification. \n"\
                          "Expected: string or nltk.grammar.PCFG object.\n"\
                          "Input: " + str(grammar))
             
     self.start_symbol = self.grammar.start()
예제 #8
0
def get_nltk_pcfg(g):
    prod_lists = [[prod for prod in g['prod'] if prod.get_lhs() == nt] for nt in g['nt']]
    #print([len(p) for p in prod_lists])
    prod_lists = filter(lambda a: len(a) > 0, prod_lists)
    strings = ['{0} -> {1}'.format(prod_list[0].get_lhs(),
                                   ('{} [{}] | '*(len(prod_list)-1)+'{} [{}]').format(
                                       *sum([[' '.join(prod.get_rhs()), prod.get_weight()] for prod in prod_list],
                                            [])
                                   )) for prod_list in prod_lists]
    strings = '\n'.join(strings)
    return PCFG.fromstring(strings)
예제 #9
0
def test_model_box():
    grammar_str = "S -> 'c' '*' 'x' [0.5] | 'x' [0.5]"
    grammar = PCFG.fromstring(grammar_str)
    expr1_str = "x"
    expr2_str = "c*x"
    symbols = {"x":['x'], "const":"c", "start":"S"}
    
    models = ModelBox()
    models.add_model(expr1_str, symbols, grammar)
    assert len(models) == 1
    models.add_model(expr2_str, symbols, grammar, p=0.5, code="1")
    assert len(models) == 2
    assert str(models[1]) == str(models["c0*x"])
    assert str(models[1]) == "c0*x"
    assert models[1].p == 0.5
예제 #10
0
def pcfg_learn(treebank, n):
    productions = list()
    for i in range(n):
        for tree in treebank.parsed_sents()[:i+1]:
            chomsky_normal_form(tree, factor='right', horzMarkov=1, vertMarkov=1, childChar='|', parentChar='^')
            prod_gen = tree_to_productions(tree)
            tree_to_append = next(prod_gen)
            while tree_to_append:
                productions.append(tree_to_append)
                try:
                    tree_to_append = next(prod_gen)
                except Exception as e:
                    tree_to_append = False
    productions = get_productions(productions)
    return PCFG(Nonterminal('S'), productions)
예제 #11
0
def pcfg_learn1(treebank, n):
    productions = list()
    for i in range(n):
        for tree in treebank.parsed_sents()[:i + 1]:
            prod_gen = tree_to_productions(tree, "BOT")
            tree_to_append = next(prod_gen)[0]
            while tree_to_append:
                if tree_to_append.lhs() == Nonterminal('NP'):
                    productions.append(tree_to_append)
                try:
                    tree_to_append = next(prod_gen)[0]
                except Exception as e:
                    tree_to_append = False
    productions, dist = get_productions(productions)
    return PCFG(Nonterminal('NP'), productions), dist
예제 #12
0
def trial_run_with_toy_grammar():
    toy_pcfg1 = PCFG.fromstring("""
        S -> NP VP [1.0]
        NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
        Det -> 'the' [0.8] | 'my' [0.2]
        N -> 'man' [0.5] | 'telescope' [0.5]
        V -> 'ate' [0.35] | 'saw' [0.65]
        VP -> VP PP [0.1] | V NP [0.7] | 'ate' [0.07] | 'saw' [0.13]
        PP -> P NP [1.0]
        P -> 'with' [0.61] | 'under' [0.39]
        """)
    toy_pcfg1 = cc.convert_grammar(toy_pcfg1)

    myparser = cky_parser.ckyparser(toy_pcfg1, Nonterminal('S'))
    chart,mytrees = myparser.probabilistic_parse_from_sent("I saw John with my telescope")
    print(mytrees)
예제 #13
0
def generate_sentences(args):
    grammar_string = ""

    with open(args.grammar_file, "r") as gram_file:
        grammar_string = gram_file.read()

    grammar = PCFG.fromstring(grammar_string)

    if args.verbose:
        print(grammar)
        print()

    for _ in range(args.num_sent):
        frags = []
        generate_sample(grammar, grammar.start(), frags)
        yield ' '.join(frags)
예제 #14
0
def PCFG_Section():
    toy_pcfg1 = PCFG.fromstring("""
        S -> NP VP [1.0]
        NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
        Det -> 'the' [0.8] | 'my' [0.2]
        N -> 'man' [0.5] | 'telescope' [0.5]
        VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
        V -> 'ate' [0.35] | 'saw' [0.65]
        PP -> P NP [1.0]
        P -> 'with' [0.61] | 'under' [0.39]
    """)

    pcfg_prods = toy_pcfg1.productions()

    pcfg_prod = pcfg_prods[2]
    print('A PCFG production:', pcfg_prod)
    print('pcfg_prod.lhs()  =>', pcfg_prod.lhs())
    print('pcfg_prod.rhs()  =>', pcfg_prod.rhs())
    print('pcfg_prod.prob() =>', pcfg_prod.prob())

    # extract productions from three trees and induce the PCFG
    print("Induce PCFG grammar from treebank data:")

    productions = []
    for item in treebank.fileids()[:2]:
      for tree in treebank.parsed_sents(item):
        # print(" ".join(tree.leaves()))
        # perform optional tree transformations, e.g.:
        # tree.collapse_unary(collapsePOS = False)# Remove branches A-B-C into A-B+C
        # tree.chomsky_normal_form(horzMarkov = 2)# Remove A->(B,C,D) into A->B,C+D->D
        prods = tree.productions()
        # print(prods[0].prob())
        productions += prods

    S = Nonterminal('S')
    grammar = induce_pcfg(S, productions)
    # print(grammar)    # This is a PCFG

    ### Parsing section below ###

    print("\nParse sentence using induced grammar:")

    parser = pchart.InsideChartParser(grammar)
    parser.trace(1)

    sent = treebank.parsed_sents('wsj_0001.mrg')[0]
    print(sent.prob())
예제 #15
0
def main():
    ''' run nltk tests '''
    print "WELCOME TO NLTK TEST"

    #sentence = ["movies", "directed", "by", "mel", "brooks"]
    #sentence = nltk.corpus.treebank.tagged_sents()[22]

    sentence = "adventure movies between 2000 and 2015 featuring performances by daniel craig"
    #sentence = "movies directed by craig"
    #sentence = "all movies where Vin Diesel acted from the 1990s"
    #sentence = "movies directed by mel brooks\r\n"
    #sentence = "action movies with Jackie Chan\r\n"
    #print sentence

    sentence = nltk.word_tokenize(sentence)
    print sentence
    #sentence = [nltk.word_tokenize(sent) for sent in sentence]
    #sentence = nltk.word_tokenize(sentence)
    #sentence = [nltk.pos_tag(sent) for sent in sentence]
    #sentence = nltk.pos_tag(sentence)
    print sentence


    #TV = transitive verb
    #IV = intransitive verb
    # DatV = Dative verb
    grammar = PCFG.fromstring("""
                S    -> NP VP                   [1.0]
                VP   -> TV IN NP                [0.8]
                VP   -> IV                      [0.1]
                VP   -> DatV NP NP              [0.1]
                IN   -> 'by'                    [1.0]
                TV   -> 'directed'              [1.0]
                IV   -> 'directed'              [1.0]
                DatV -> 'gave'                  [1.0]
                NP   -> 'movies'      [0.5]
                NP   -> 'craig'          [0.5]
            """)

    #print grammar
    viterbi_parser = nltk.ViterbiParser(grammar)
    for tree in viterbi_parser.parse(sentence):
        print tree
예제 #16
0
def create_pcfg(start_symbol, productions):
    pcount = {}
    lcount = {}

    for prod in productions:
        lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1
        pcount[prod] = pcount.get(prod, 0) + 1

    prods = [
        ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()])
        for p in pcount
    ]

    # threshold= 5e-3
    # to_remove = [p for p in prods if p.is_lexical() and len(p) == 1 and p.prob() < threshold]
    
    # if to_remove:
    #     return create_pcfg(start_symbol, [p for p in prods if p.is_lexical() and len(p) == 1 and p.prob() > threshold])

    return PCFG(start_symbol, prods)
예제 #17
0
def load_grammar(content):
    """
  Load a grammar from a string

  This is similar to calling PCFG.from_string directly, but it does a 
  little extra parsing to make probabilities optional.

  >>> load_grammar('Start -> ').productions()
  [Start ->  [0]]
  
  >>> load_grammar("Start -> 'a' [.3] | 'b' [.7]").productions()
  [Start -> 'a' [0.3], Start -> 'b' [0.7]]
  """
    content = '\n'.join(
        map(lambda l: l if not l or l[-1] == ']' else l + ' [0]',
            content.splitlines()))

    PCFG.EPSILON = 2  # Allow probabilities to sum to nearly anything

    return PCFG.fromstring(content)
예제 #18
0
def to_grammar(sequences, sections):
    end_state = np.max(np.hstack(sequences)) + 1
    #for now removing -1 (but deal with it later!)
    sequences = [np.append(s[s >= 0], end_state) for s in sequences]
    new_seqs = to_productions(sequences, end_state)
    trees = [Tree.fromstring(to_tree(s[1:], sections, s[0])) for s in new_seqs]
    prods = [p for t in trees for p in t.productions()]
    prods = induce_pcfg(Nonterminal('S'), prods).productions()
    grammar_string = '\n'.join([str(p) for p in prods])
    for k in set([s[0] for s in new_seqs if s[0] != 'S']):
        grammar_string = grammar_string.replace("'" + str(k) + "'", str(k))
    grammar = PCFG.fromstring(grammar_string)
    print(grammar)
    parser = InsideChartParser(grammar)
    #parser.trace(1)
    sentences = [
        Tree.fromstring(to_tree(s[:-1], sections)).leaves() for s in sequences
    ]
    parses = flatten(multiprocess('parsing', parser.parse_all, sentences), 1)
    probs = mean_probs(parses, grammar)
    print(probs)
예제 #19
0
 def __init__(self, grammar=None, load_grammar=False):
     if grammar is None:
         if load_grammar:
             file = open('./grammar.txt', 'rb')
             grammar_string = pickle.load(file)
             file.close()
             # copy the first line of the file into this lstrip method
             grammar_string = grammar_string.lstrip(
                 "Grammar with 7652 productions (start state = S)")
             print(grammar_string)
             grammar_string = re.sub(' \. ', " PERIOD ", grammar_string)
             grammar_string = re.sub(' , ', " COMMA ", grammar_string)
             grammar_string = re.sub(' -NONE- ', " NONE ", grammar_string)
             print("tried")
             self.grammar = PCFG.fromstring(grammar_string)
         else:
             pcfg_trainer = PCFGTrainer()
             self.grammar = pcfg_trainer.train()
     else:
         self.grammar = grammar
     self.unwrapped_parser = self.init_viterbi()
예제 #20
0
파일: cky.py 프로젝트: BabisK/M36209P
def main():
    parser = argparse.ArgumentParser(description='CKY and PCKY')
    parser.add_argument('-g', '--grammar', help='Input file name', required=True)
    parser.add_argument('-s', '--sentence', help='Input sentence', required=True)
    args = parser.parse_args()

    grammar_text = None
    with open(args.grammar, 'r') as f:
        grammar_text = f.read()

    grammar = None
    result = None
    try:
        grammar = CFG.fromstring(grammar_text)
    except ValueError:
        grammar = PCFG.fromstring(grammar_text)

    if type(grammar) is CFG:
        result = cky(args.sentence, grammar)
    elif type(grammar) is PCFG:
        result = pcky(args.sentence, grammar)
예제 #21
0
def main():
    parser = argparse.ArgumentParser(description="Theory Generator.")
    parser.add_argument("--grammar",
                        required=True,
                        help="Grammar (CFG) for theory")
    parser.add_argument(
        "--config-json",
        required=True,
        help="Json format config file with parameters to generate theory",
    )
    parser.add_argument(
        "--op-theory-jsonl",
        help=
        "Output Jsonl file containing an example json object per line. Json object has the format of the TheoryAssertionInstance class",
    )
    parser.add_argument(
        "--theorem-prover",
        choices=common.supported_theorem_provers,
        default=common.default_theorem_prover,
        help=
        "Thorem proving engine to use. Only supported one right now is problog.",
    )
    args = parser.parse_args()

    with open(args.grammar,
              "r") as grammar_file, open(args.config_json,
                                         "r") as config_json_file:
        theory_op_file = open(args.op_theory_jsonl, "w")
        config = json.load(config_json_file)
        production_strs = preprocess_pcfg(grammar_file)
        grammar_str = "\n".join(production_strs)
        grammar = PCFG.fromstring(grammar_str)
        generate_theory(
            grammar,
            config,
            theory_op_file,
            args.theorem_prover,
        )
예제 #22
0
 def renormalize(self, height=10**4, tol=10**(-17), min_height=100):
     """Return renormalized grammar. 
     
     Raise ValueError if for at least one nonterminal, its coverage
     equals zero.
     Input:
         height - maximal height of parse trees of which the
             coverage is calculated of.
         tol - tolerance as a stopping condition. If change
             is smaller than the input tolerance, then it stops.
         min_height - overrides tolerance stopping condition and
             calculates coverage of all heights <= min_height. It
             also determines for how many previous steps the change
             is measured, i.e. for levels (height-1 - min_height/2).
         verbosity - if set to > 0, it prints stopping probability
             change, height and input tolerance.
     """
     coverages_dict = self.list_coverages(height, tol, min_height)
     if min(coverages_dict[A] for A in coverages_dict) < tol:  # input tol
         print([A for A in coverages_dict if coverages_dict[A] < tol])
         raise ValueError("Not all coverages are positive, so"
                         + " renormalization cannot be performed since zero"
                         + " division.")
     def chi(prod, coverages_dict):
         """Renormalizes production probability p^~ as in Chi paper(22)."""
         subprobabs = prod.prob()
         for symbol in prod.rhs():
             if not isinstance(symbol, Nonterminal):
                 continue  # or subprobabs = 1
             else:
                 subprobabs *= coverages_dict[symbol]
         return subprobabs/coverages_dict[prod.lhs()]
     prods = [ProbabilisticProduction(prod.lhs(), prod.rhs(),
                                     prob=chi(prod, coverages_dict))
             for prod in self.grammar.productions()]
     return PCFG(self.grammar.start(), prods)
예제 #23
0
def update_grammar(words, grammar, smoothing=None):
    # if smoothing is None use Add One.
    pcount = {}
    lcount = 0
    new_prods = []
    lhs = None
    for prod in grammar.productions():
        if str(prod.lhs()) == 'NN':
            lhs = prod.lhs()
            lcount += 1
            pcount[prod] = pcount.get(prod, 0) + 1

    add = len(words) + len(pcount)
    avg = 1 / lcount

    if lhs is None:
        lhs = Nonterminal('NN')

    for word in words:
        rhs = (word.strip("'"), )
        if smoothing is None:
            prob = 1 / (lcount + add)
        else:
            prob = avg / len(words)
        prod = ProbabilisticProduction(lhs, rhs, prob=prob)
        new_prods.append(prod)

    for p in grammar.productions():
        if str(p.lhs()) == 'NN':
            if smoothing is None:
                p = ProbabilisticProduction(p.lhs(), p.rhs(), prob= (pcount[p] + 1) / (lcount + add))
            else:
                p = ProbabilisticProduction(p.lhs(), p.rhs(), prob= p.prob() - (avg / lcount))
        new_prods.append(p)

    return PCFG(grammar.start(), new_prods)
예제 #24
0
from nltk import PCFG
toy_pcfg1 = PCFG.fromstring("""
S -> NP VP [1.0]
NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
Det -> 'the' [0.8] | 'my' [0.2]
N -> 'man' [0.5] | 'telescope' [0.5]
VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
V -> 'ate' [0.35] | 'saw' [0.65]
PP -> P NP [1.0]
P -> 'with' [0.61] | 'under' [0.39]
""")
print toy_pcfg1
예제 #25
0
# grammar = ''
# g1 = [i for i in file1.readlines()]
# for i in g1:
#     grammar += i
# print(grammar)

grammar = nltk.data.load('/home/omari/Dropbox/robot_modified/AR/grammar/'+g)
print(grammar.split('\n')[0])
print(grammar.split('\n')[1])
# grammar2 = grammar.split('\n')[0] +'\n'+ grammar.split('\n')[1]+'\n'

grammar2 = 'S -> a [1.0]\n'
w = 'حرك'
grammar2 += 'a -> '+unicode(w,encoding='utf8')+' [1.0]\n'
print (grammar2)
learned_pcfg = PCFG.fromstring(grammar2)
##//////////////////////////////////////////////////////
##  Viterbi PCFG Parser
##//////////////////////////////////////////////////////

# @python_2_unicode_compatible
class ViterbiParser(ParserI):
    """
    A bottom-up ``PCFG`` parser that uses dynamic programming to find
    the single most likely parse for a text.  The ``ViterbiParser`` parser
    parses texts by filling in a "most likely constituent table".
    This table records the most probable tree representation for any
    given span and node value.  In particular, it has an entry for
    every start index, end index, and node value, recording the most
    likely subtree that spans from the start index to the end index,
    and has the given node value.
def demo():
    """
    A demonstration of the probabilistic parsers.  The user is
    prompted to select which demo to run, and how many parses should
    be found; and then each parser is run on the same demo, and a
    summary of the results are displayed.
    """
    import sys, time
    from nltk import tokenize
    from nltk.parse import ViterbiParser
    from nltk.grammar import toy_pcfg1, toy_pcfg2
    from nltk.draw.tree import draw_trees
    from nltk import Tree
    from nltk.draw.util import CanvasFrame
    from nltk.draw import TreeWidget

    # Define two demos.  Each demo has a sentence and a grammar.
    # demos = [('move the green sphere to the bottom left corner', learned_pcfg),
    #          ('move the green ball over the red block', learned_pcfg),
    #          ('take the green pyramid and put it in the top left corner', learned_pcfg),
    #           ('put the green pyramid on the red block', learned_pcfg),
    #           ('move the red cylinder and place it on top of the blue cylinder that is on top of a green cylinder', learned_pcfg),]

    # Ask the user which demo they want to use.
    # print()
    # for i in range(len(demos)):
    #     print('%3s: %s' % (i+1, demos[i][0]))
    #     print('     %r' % demos[i][1])
    #     print()
    # print('Which demo (%d-%d)? ' % (1, len(demos)), end=' ')
    # try:
    #     snum = int(sys.stdin.readline().strip())-1
    #     sent, grammar = demos[snum]
    # except:
    #     print('Bad sentence number')
    #     return

    max_scene = 300

    if max_scene<10:            sc = '0000'+str(max_scene)
    elif max_scene<100:         sc = '000'+str(max_scene)
    elif max_scene<1000:        sc = '00'+str(max_scene)
    elif max_scene<10000:       sc = '0'+str(max_scene)

    g = 'grammar_'+sc+'.txt'
    file1 = open('/home/omari/Dropbox/robot_modified/grammar/'+g, 'r')
    grammar = ''
    g1 = [i for i in file1.readlines()]
    for i in g1:
        grammar += i
    learned_pcfg = PCFG.fromstring(grammar)
    grammar = learned_pcfg

    file1 = open('/home/omari/Dropbox/robot_modified/hypotheses/matched_commands.txt', 'r')
    g1 = [i for i in file1.readlines()]
    for line in g1:
        sent = line.split('\n')[0].split('-')[-1]
        scene = line.split('\n')[0].split('-')[0]
        sent_num = line.split('\n')[0].split('-')[1]
        print(line)
        if scene == '239' and sent_num == '0':  continue 


        # Tokenize the sentence.
        tokens = sent.split()

        parser = ViterbiParser(grammar)
        all_parses = {}

        # print('\nsent: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar))
        parser.trace(3)
        parses = parser.parse_all(tokens)
        average = (reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
                   if parses else 0)
        num_parses = len(parses)
        for p in parses:
            all_parses[p.freeze()] = 1

        # Print some summary statistics
        # print()
        # print('Time (secs)   # Parses   Average P(parse)')
        # print('-----------------------------------------')
        # print('%11.4f%11d%19.14f' % (time, num_parses, average))
        parses = all_parses.keys()
        if parses:
            p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
        else: p = 0
        # print('------------------------------------------')
        # print('%11s%11d%19.14f' % ('n/a', len(parses), p))

        # Ask the user if we should draw the parses.
        # print()
        # print('Draw parses (y/n)? ', end=' ')
        # if sys.stdin.readline().strip().lower().startswith('y'):

        #     print('  please wait...')
        # draw_trees(*parses)

        cf = CanvasFrame()
        # t = Tree(parses)
        t = Tree.fromstring('(S  (CH_POS_PREPOST move)  (PRE_POST    (PRE      (the the)      (_entity (F_HSV green) (F_SHAPE sphere)))    (PREPOST_connect (to to) (the the))    (POST      (_F_POS (F_POS (_bottom_left (bottom bottom) (left left)))) (corner corner))))')

        tc = TreeWidget(cf.canvas(), t, draggable=1,
                        node_font=('helvetica', -14),
                        leaf_font=('helvetica', -12),
                        roof_fill='white', roof_color='black',
                        leaf_color='green4', node_color='blue4')
        cf.add_widget(tc,10,10)

        # tc = TreeWidget(cf.canvas(),t)
        # cf.add_widget(tc,10,10) # (10,10) offsets
        cf.print_to_file('/home/omari/Dropbox/robot_modified/trees/scene-'+scene+'-'+sent_num+'.ps')
        cf.destroy()
예제 #27
0
EPOCHS = 30

#############################################################
# grammars and coding

# each name_grammar is followed by:
# name_code_for = the codes for the terminals, and the fill code (last)
# name_predict = a list of the terminals to predict in a string
# name_sample_depth = max depth of derivations to produce sample

# Dyck language on two kinds of parentheses
# terminals to predict: ), ]
parentheses_grammar = PCFG.fromstring("""
S -> S S [0.20]
S -> '(' S ')' [0.20] | '(' ')' [0.20]
S -> '[' S ']' [0.20] | '[' ']' [0.20]
""")
parentheses_code_for = {u'(': 0, u')': 1, u'[': 2, u']': 3, '#': 4}
parentheses_predict = [u')', u']']
parentheses_sample_depth = 5
# depth = 5 yields 15,130 strings

# center-marked "palindromes" (using primed symbols to predict)
# terminals to predict: a1, b1
reverse_grammar = PCFG.fromstring("""
S -> "a" S "a1" [0.48]
S -> "b" S "b1" [0.48]
S -> "c" [0.04]
""")
reverse_code_for = {u"a": 0, u"b": 1, u"a1": 2, u"b1": 3, u"c": 4, u"#": 5}
예제 #28
0
    def get_full_expr(self):
        return self.full_expr(*self.params)

    def __str__(self):
        return str(self.expr)

    def __repr__(self):
        return str(self.expr)


if __name__ == '__main__':
    print("--- model.py test ---")
    from nltk import PCFG
    grammar_str = "S -> 'c' '*' 'x' [1.0]"
    grammar = PCFG.fromstring(grammar_str)
    parse_tree_code = "0"
    expression_str = "c*x"
    probability = 1.0
    symbols_params = ["c"]
    symbols_variables = ["x"]

    print("Create the model instance and print the model.")
    model = Model(expr=expression_str,
                  grammar=grammar,
                  code=parse_tree_code,
                  p=probability,
                  sym_params=symbols_params,
                  sym_vars=symbols_variables)
    print(model)
    assert str(model) == expression_str
예제 #29
0
def main(args):
    logging.info("Reading and preparing grammar from file")
    # Read and prepare the grammar from file
    with open(args.grammar, 'r') as f:
        pcfg_string = f.read()
    induced_grammar = PCFG.fromstring(pcfg_string)
    induced_grammar._start = Nonterminal('TOP')

    logging.info("Reading and preparing induction and evaluation messages")
    # Read and prepare induction/evaluation messages
    induction_messages = load_messages(args.induct)
    evaluation_messages = load_messages(args.eval)

    # Get some metrics
    logging.info("Providing grammar related statistics")
    ## Grammar
    grammar_results = analyse_grammar(induced_grammar)

    logging.info("Providing word class statistics")
    ## Word classes
    preterminals, terminals = get_stat_dicts(induced_grammar)
    word_class_results = {
        'avg terminals/preterminal': calculate_average(preterminals),
        'avg preterminals/terminal': calculate_average(terminals),
    }

    ## Parses
    logging.info("Providing Viterbi parse related statistics")
    induct_viterbi_results = analyse_viterbi(induced_grammar,
                                             induction_messages)
    eval_viterbi_results = analyse_viterbi(induced_grammar,
                                           evaluation_messages)

    # Write to file
    args.output

    cols = [
        'name', 'parser', 'type', 'date+timestamp', 'induct_fp', 'eval_fp',
        'full_fp'
    ]
    vals = [
        args.name, args.parser, args.type,
        datetime.now(), args.induct, args.eval, args.full
    ]

    ## Add grammar metrics
    grammar_metrics = ['log2prior', 'terminals', 'preterminals', 'recursive']
    #grammar_metrics = ['GDL', 'terminals', 'preterminals', 'recursive']
    cols += grammar_metrics
    for m in grammar_metrics:
        vals.append(grammar_results[m])

    ## Add word class metrics
    word_class_metrics = [
        'avg terminals/preterminal', 'avg preterminals/terminal'
    ]
    cols += word_class_metrics
    for m in word_class_metrics:
        vals.append(word_class_results[m])

    ## Add parse metrics
    parse_metrics = ['average_log2likelihood', 'coverage']
    #parse_metrics = ['average_DDL', 'coverage']
    for m in parse_metrics:
        cols.append('induct_' + m)
        vals.append(induct_viterbi_results[m])
        cols.append('eval_' + m)
        vals.append(eval_viterbi_results[m])
    logging.debug(str(eval_viterbi_results['average_log2likelihood']))

    ## Add overgeneration coverage if -L and --overgeneration is set
    overgeneration_metrics = [
        'overgeneration_coverage', 'overgeneration_coverage_N'
    ]
    cols += overgeneration_metrics
    if (args.overgeneration > 0) and args.L:
        logging.info("Estimating overgeneration coverage")
        vals.append(
            overgeneration_coverage(induced_grammar, args.L,
                                    args.overgeneration))
    else:
        logging.info("Skipping estimation of overgeneration coverage")
        vals.append('NaN')
    vals.append(args.overgeneration)

    ## Add preterminal group metrics
    logging.info("Calculating preterminal group metrics")
    preterminals, terminals = get_stat_dicts(induced_grammar)
    nominals, groups, nominals_count, groups_count = get_stats_wordclass_groups(
        induced_grammar, preterminals, terminals)
    preterminalgroup_metrics = [
        'number of nominals', 'number of pre-terminal groups',
        'average number of pre-terminal groups generated by nominal'
    ]
    cols += preterminalgroup_metrics
    vals.append(len(nominals))
    vals.append(len(groups))
    vals.append(nominals_count)

    ## To csv file
    if not os.path.exists(args.output):
        with open(args.output, 'w') as f:
            writer = csv.writer(f)
            writer.writerows([cols] + [vals])
    else:
        with open(args.output, 'a') as f:
            writer = csv.writer(f)
            writer.writerows([vals])
    logging.info("Finished providing metrics for induced grammar")
예제 #30
0
from random import choices
from random import sample

MT = 1
N_SEN = 50
grammar_path = "../grammars/grammar_rules_2.txt"

demo_pcfg = PCFG.fromstring("""
    S -> VP          [0.25]
    S -> NP VP       [0.75]
    
    VP -> V          [0.50]
    VP -> V NP       [0.50]
    
    NP -> D N        [0.40]
    NP -> N          [0.60]

    V -> 'read'      [0.75]
    V -> 'study'     [0.25]
    
    D -> 'the'       [1.00]
    
    N -> 'children'  [0.60]
    N -> 'books'     [0.40]
""")

demo_cfg = CFG.fromstring("""
    S -> VP
    S -> NP VP
    
    VP -> V
    VP -> V NP
예제 #31
0
grammar_old = PCFG.fromstring("""
    S -> NT38 NT73 [0.00218381]
    NT38 -> NT13 UnaryNT12 [1.0]
    NT13 -> UnaryNT11 Digit [0.983932]
    UnaryNT11 -> '-' [1.0]
    Digit -> '7' [0.0909933]
    UnaryNT12 -> '+' [1.0]
    NT73 -> NT67 UnaryNT3 [1.0]
    NT67 -> NT16 UnaryNT4 [1.0]
    NT16 -> UnaryNT1 NT13 [1.0]
    UnaryNT1 -> '(' [0.994503]
    Digit -> '3' [0.086164]
    UnaryNT4 -> 'x' [1.0]
    UnaryNT3 -> ')' [0.982952]
    S -> '8' [0.00959646]
    S -> NT13 UnaryNT4 [0.0246371]
    NT13 -> NT13 NT13 [0.0160676]
    S -> NT94 NT28 [0.00144562]
    NT94 -> NT38 NT73 [1.0]
    NT28 -> UnaryNT11 NT18 [1.0]
    NT18 -> NT16 UnaryNT3 [0.998724]
    S -> Digit NT28 [0.00753568]
    Digit -> '8' [0.082603]
    S -> NT21 NT15 [0.00498278]
    NT21 -> NT13 UnaryNT4 [1.0]
    NT15 -> UnaryNT12 Digit [0.988942]
    S -> Digit NT15 [0.0125492]
    S -> Digit Digit [0.0239296]
    Digit -> '1' [0.147157]
    Digit -> '5' [0.0848794]
    S -> NT21 NT17 [0.013718]
    NT17 -> UnaryNT5 NT13 [1.0]
    UnaryNT5 -> '/' [1.0]
    S -> Digit NT17 [0.0202079]
    Digit -> Digit Digit [0.0933999]
    S -> 'x' [0.0183317]
    S -> NT25 NT15 [0.00273745]
    NT25 -> Digit UnaryNT4 [1.0]
    S -> '2' [0.0119033]
    S -> NT25 NT39 [0.00295276]
    NT39 -> NT15 NT13 [1.0]
    S -> Digit NT13 [0.0211922]
    Digit -> '2' [0.0964894]
    S -> UnaryNT1 NT383 [0.000184547]
    NT383 -> NT110 NT79 [1.0]
    NT110 -> NT25 NT39 [1.0]
    NT79 -> UnaryNT3 NT14 [1.0]
    NT14 -> UnaryNT5 Digit [1.0]
    S -> NT140 NT79 [0.000984252]
    NT140 -> UnaryNT1 NT37 [1.0]
    NT37 -> Digit NT13 [1.0]
    S -> Digit NT20 [0.0135335]
    NT20 -> UnaryNT4 NT14 [1.0]
    S -> NT13 NT14 [0.0228223]
    S -> UnaryNT11 Digit [0.120079]
    Digit -> '6' [0.0902779]
    S -> NT13 NT28 [0.00827387]
    S -> NT13 NT17 [0.0210384]
    S -> Digit NT14 [0.0228223]
    S -> NT33 UnaryNT9 [0.00193775]
    NT33 -> Digit NT15 [1.0]
    UnaryNT9 -> 'y' [1.0]
    S -> NT81 NT13 [0.00286048]
    NT81 -> NT33 UnaryNT9 [1.0]
    S -> '1' [0.0111959]
    S -> Digit UnaryNT10 [0.000369094]
    UnaryNT10 -> 'Y' [1.0]
    S -> NT266 NT14 [0.0000615157]
    NT266 -> Digit UnaryNT10 [1.0]
    S -> 'Y' [0.000369094]
    Digit -> '9' [0.0766679]
    S -> NT22 NT15 [0.00279897]
    NT22 -> Digit UnaryNT9 [1.0]
    S -> NT106 NT28 [0.000153789]
    NT106 -> NT22 NT15 [1.0]
    S -> NT13 NT15 [0.0159941]
    S -> NT13 NT13 [0.0185162]
    S -> NT22 NT39 [0.00224532]
    Digit -> '4' [0.0913511]
    S -> Digit UnaryNT9 [0.0255598]
    S -> NT22 NT14 [0.0130721]
    S -> 'y' [0.0184855]
    S -> NT38 NT20 [0.00193775]
    S -> NT13 NT23 [0.00719734]
    NT23 -> UnaryNT7 Digit [0.956388]
    UnaryNT7 -> '*' [1.0]
    S -> NT274 NT86 [0.000123031]
    NT274 -> NT172 NT20 [1.0]
    NT172 -> NT16 UnaryNT12 [1.0]
    NT86 -> UnaryNT3 NT23 [1.0]
    S -> NT341 NT14 [0.000153789]
    NT341 -> NT274 NT86 [1.0]
    S -> NT89 NT28 [0.00101501]
    NT89 -> NT38 NT20 [1.0]
    S -> '5' [0.00990404]
    S -> UnaryNT4 NT14 [0.00925812]
    S -> Digit NT23 [0.00916585]
    S -> NT20 NT23 [0.00888903]
    S -> NT40 NT92 [0.000922736]
    NT40 -> Digit UnaryNT12 [1.0]
    NT92 -> UnaryNT1 NT90 [1.0]
    NT90 -> NT35 UnaryNT3 [1.0]
    NT35 -> UnaryNT9 NT17 [1.0]
    S -> NT117 NT13 [0.00107653]
    NT117 -> NT40 NT92 [1.0]
    S -> UnaryNT9 NT17 [0.00639764]
    S -> UnaryNT11 NT26 [0.00375246]
    NT26 -> UnaryNT9 NT14 [1.0]
    S -> NT54 NT23 [0.000338337]
    NT54 -> UnaryNT11 NT26 [1.0]
    S -> NT19 NT14 [0.000984252]
    NT19 -> NT13 UnaryNT9 [1.0]
    S -> NT152 NT23 [0.000399852]
    NT152 -> NT19 NT14 [1.0]
    S -> NT13 UnaryNT9 [0.0281435]
    S -> NT19 NT17 [0.0145177]
    S -> '9' [0.00888903]
    S -> NT20 NT29 [0.00147638]
    NT29 -> UnaryNT12 NT18 [0.985335]
    S -> NT20 NT77 [0.00104577]
    NT77 -> NT29 NT15 [1.0]
    S -> '6' [0.00882751]
    S -> NT40 NT65 [0.00190699]
    NT65 -> NT63 UnaryNT3 [1.0]
    NT63 -> UnaryNT1 NT32 [1.0]
    NT32 -> UnaryNT4 NT17 [1.0]
    S -> NT100 NT13 [0.00172244]
    NT100 -> NT40 NT65 [1.0]
    S -> '0' [0.0135335]
    S -> UnaryNT4 NT17 [0.00596703]
    S -> Digit NT31 [0.00565945]
    Digit -> '0' [0.0600172]
    NT31 -> UnaryNT7 NT18 [0.996815]
    S -> NT32 NT31 [0.00529035]
    S -> '7' [0.009781]
    S -> NT38 NT65 [0.00144562]
    S -> NT99 NT15 [0.00113804]
    NT99 -> NT38 NT65 [1.0]
    S -> '3' [0.0100271]
    S -> Digit NT30 [0.00756644]
    NT30 -> UnaryNT5 UnaryNT9 [1.0]
    S -> NT46 NT47 [0.00719734]
    NT46 -> Digit NT30 [1.0]
    NT47 -> UnaryNT7 UnaryNT9 [1.0]
    S -> NT13 NT30 [0.00778174]
    S -> NT45 NT47 [0.00747416]
    NT45 -> NT13 NT30 [1.0]
    S -> NT13 NT27 [0.00725886]
    NT27 -> UnaryNT5 UnaryNT4 [1.0]
    S -> NT44 NT52 [0.00756644]
    NT44 -> NT13 NT27 [1.0]
    NT52 -> UnaryNT7 UnaryNT4 [1.0]
    S -> Digit NT27 [0.00753568]
    S -> NT43 NT52 [0.00501353]
    NT43 -> Digit NT27 [1.0]
    S -> Digit UnaryNT4 [0.0233145]
    S -> NT19 NT15 [0.00556718]
    S -> NT19 NT39 [0.00322958]
    S -> NT16 NT215 [0.0000922736]
    NT215 -> NT15 NT79 [1.0]
    S -> NT55 NT215 [0.000123031]
    NT55 -> NT16 UnaryNT9 [1.0]
    S -> NT16 NT327 [0.000123031]
    NT327 -> NT215 NT13 [1.0]
    S -> NT55 NT327 [0.000153789]
    S -> NT24 NT13 [0.00043061]
    NT24 -> NT13 NT15 [1.0]
    S -> NT19 NT23 [0.000215305]
    S -> NT284 NT86 [0.0000922736]
    NT284 -> NT16 NT15 [1.0]
    S -> NT268 NT86 [0.000123031]
    NT268 -> NT55 NT15 [1.0]
    S -> NT56 NT15 [0.000215305]
    NT56 -> Digit NT23 [1.0]
    S -> NT374 NT15 [0.0000922736]
    NT374 -> NT268 NT86 [1.0]
    S -> NT238 NT15 [0.000369094]
    NT238 -> NT235 NT86 [1.0]
    NT235 -> UnaryNT1 NT179 [1.0]
    NT179 -> NT88 NT15 [1.0]
    NT88 -> UnaryNT11 UnaryNT9 [1.0]
    S -> NT238 NT39 [0.000123031]
    S -> NT235 NT86 [0.0000922736]
    S -> NT263 NT233 [0.0000922736]
    NT263 -> NT88 NT23 [1.0]
    NT233 -> NT15 NT23 [1.0]
    S -> NT263 NT15 [0.000123031]
    S -> NT263 NT39 [0.000123031]
    S -> NT88 NT23 [0.000123031]
    S -> NT88 NT194 [0.000215305]
    NT194 -> NT23 NT14 [1.0]
    S -> NT280 NT23 [0.000184547]
    NT280 -> NT88 NT194 [1.0]
    S -> NT13 NT647 [0.0000307579]
    NT647 -> NT226 NT28 [1.0]
    NT226 -> UnaryNT5 UnaryNT1 [1.0]
    UnaryNT3 -> UnaryNT3 UnaryNT3 [0.0170476]
    S -> NT19 NT647 [0.0000307579]
    S -> NT140 NT116 [0.0000922736]
    NT116 -> UnaryNT3 NT17 [1.0]
    S -> NT55 NT421 [0.000123031]
    NT421 -> NT39 NT116 [1.0]
    S -> NT41 NT211 [0.000153789]
    NT41 -> Digit NT17 [1.0]
    NT211 -> NT161 NT181 [1.0]
    NT161 -> UnaryNT11 UnaryNT1 [1.0]
    NT181 -> NT41 UnaryNT3 [1.0]
    S -> NT48 NT320 [0.000276821]
    NT48 -> NT19 NT17 [1.0]
    NT320 -> NT303 NT312 [1.0]
    NT303 -> UnaryNT12 UnaryNT1 [1.0]
    NT312 -> NT181 NT211 [1.0]
    S -> NT41 NT28 [0.0000922736]
    S -> UnaryNT9 NT320 [0.0000307579]
    S -> NT736 NT211 [0.0000307579]
    NT736 -> UnaryNT9 NT29 [1.0]
    S -> UnaryNT9 NT594 [0.0000615157]
    NT594 -> NT13 NT211 [1.0]
    S -> UnaryNT9 NT61 [0.000123031]
    NT61 -> NT13 NT28 [1.0]
    S -> UnaryNT9 NT24 [0.000153789]
    S -> NT530 NT79 [0.0000922736]
    NT530 -> UnaryNT1 NT123 [1.0]
    NT123 -> NT22 NT39 [1.0]
    S -> NT34 NT36 [0.000184547]
    NT34 -> Digit NT14 [1.0]
    NT36 -> NT13 NT14 [1.0]
    S -> NT51 NT397 [0.0000615157]
    NT51 -> NT22 NT14 [1.0]
    NT397 -> NT170 NT36 [1.0]
    NT170 -> NT15 NT14 [1.0]
    S -> UnaryNT9 NT397 [0.000123031]
    S -> NT40 NT57 [0.00230684]
    NT57 -> NT55 UnaryNT3 [1.0]
    S -> Digit NT19 [0.00295276]
    S -> NT87 NT13 [0.00163017]
    NT87 -> Digit NT19 [1.0]
    S -> NT21 NT39 [0.0037217]
    S -> NT35 NT29 [0.00126107]
    S -> NT35 NT77 [0.000276821]
    S -> NT35 NT31 [0.00470595]
    S -> NT13 NT31 [0.00676673]
    S -> NT20 NT15 [0.000891978]
    S -> NT20 NT39 [0.00113804]
    S -> NT35 NT15 [0.00212229]
    S -> NT35 NT39 [0.00258366]
    S -> NT54 NT39 [0.000891978]
    S -> Digit NT62 [0.00178396]
    NT62 -> NT58 NT59 [1.0]
    NT58 -> UnaryNT7 NT16 [1.0]
    NT59 -> NT14 UnaryNT3 [1.0]
    S -> NT54 NT62 [0.00227608]
    S -> NT253 NT14 [0.0000922736]
    NT253 -> NT54 NT31 [1.0]
    S -> UnaryNT1 NT347 [0.000246063]
    NT347 -> NT51 NT79 [1.0]
    S -> UnaryNT9 NT500 [0.0000922736]
    NT500 -> NT31 NT17 [1.0]
    S -> UnaryNT10 NT17 [0.000123031]
    S -> NT308 NT31 [0.000153789]
    NT308 -> UnaryNT10 NT17 [1.0]
    S -> UnaryNT8 NT14 [0.0000615157]
    UnaryNT8 -> 'X' [1.0]
    S -> NT559 NT23 [0.0000307579]
    NT559 -> UnaryNT8 NT14 [1.0]
    S -> 'X' [0.0000922736]
    S -> NT38 NT92 [0.00104577]
    S -> NT133 NT28 [0.000461368]
    NT133 -> NT38 NT92 [1.0]
    S -> '4' [0.0123647]
    S -> NT24 UnaryNT9 [0.00289124]
    S -> NT78 NT28 [0.000645915]
    NT78 -> NT24 UnaryNT9 [1.0]
    S -> NT22 NT29 [0.00316806]
    S -> NT22 NT66 [0.00132259]
    NT66 -> NT29 NT28 [1.0]
    S -> NT24 UnaryNT4 [0.00261442]
    S -> NT80 NT28 [0.00104577]
    NT80 -> NT24 UnaryNT4 [1.0]
    S -> NT80 NT15 [0.00199926]
    S -> NT25 NT24 [0.00144562]
    S -> NT95 NT13 [0.00129183]
    NT95 -> NT40 NT57 [1.0]
    S -> NT13 NT160 [0.000830463]
    NT160 -> UnaryNT6 Digit [1.0]
    UnaryNT6 -> '.' [1.0]
    S -> NT21 NT29 [0.00193775]
    S -> NT21 NT66 [0.00153789]
    S -> NT32 NT15 [0.00150714]
    S -> NT32 NT39 [0.00119956]
    S -> NT38 NT26 [0.00119956]
    S -> NT111 NT28 [0.000830463]
    NT111 -> NT38 NT26 [1.0]
    S -> UnaryNT9 NT14 [0.00575172]
    S -> NT26 NT23 [0.00599779]
    S -> NT26 NT15 [0.00113804]
    S -> NT26 NT39 [0.00184547]
    S -> NT44 NT23 [0.000153789]
    S -> NT44 NT185 [0.00043061]
    NT185 -> NT58 NT182 [1.0]
    NT182 -> NT27 UnaryNT3 [1.0]
    S -> NT13 NT185 [0.0005844]
    S -> Digit NT139 [0.000738189]
    NT139 -> UnaryNT7 NT121 [1.0]
    NT121 -> NT115 Digit [1.0]
    NT115 -> NT104 UnaryNT2 [1.0]
    NT104 -> NT101 UnaryNT3 [1.0]
    NT101 -> UnaryNT1 NT43 [1.0]
    UnaryNT2 -> '^' [1.0]
    S -> NT33 UnaryNT4 [0.00341412]
    S -> NT64 NT13 [0.00439838]
    NT64 -> NT33 UnaryNT4 [1.0]
    S -> NT78 NT15 [0.00159941]
    S -> Digit NT160 [0.000553642]
    S -> NT89 NT15 [0.00159941]
    S -> UnaryNT11 UnaryNT9 [0.00242987]
    S -> UnaryNT11 NT35 [0.00123031]
    S -> NT38 NT57 [0.00215305]
    S -> NT96 NT15 [0.000676673]
    NT96 -> NT38 NT57 [1.0]
    S -> NT26 NT29 [0.00135335]
    S -> NT26 NT77 [0.000830463]
    S -> NT32 NT29 [0.00153789]
    S -> NT32 NT77 [0.000707431]
    S -> NT133 NT15 [0.000522884]
    S -> NT40 NT73 [0.00156865]
    S -> NT122 NT13 [0.000738189]
    NT122 -> NT40 NT73 [1.0]
    S -> NT18 NT17 [0.000246063]
    S -> UnaryNT1 NT683 [0.0000307579]
    NT683 -> NT207 NT116 [1.0]
    NT207 -> NT122 NT13 [1.0]
    S -> NT41 NT493 [0.0000922736]
    NT493 -> NT465 NT211 [1.0]
    NT465 -> UnaryNT12 NT50 [1.0]
    NT50 -> NT21 NT17 [1.0]
    S -> NT36 NT493 [0.0000307579]
    S -> NT295 NT211 [0.0000615157]
    NT295 -> NT36 NT227 [1.0]
    NT227 -> UnaryNT12 UnaryNT4 [1.0]
    S -> NT28 NT14 [0.000123031]
    S -> NT295 NT332 [0.0000922736]
    NT332 -> UnaryNT11 NT220 [1.0]
    NT220 -> NT16 NT59 [1.0]
    S -> NT210 NT31 [0.000369094]
    NT210 -> NT28 NT14 [1.0]
    S -> NT319 NT390 [0.000153789]
    NT319 -> NT16 NT309 [1.0]
    NT309 -> NT14 NT227 [1.0]
    NT390 -> NT332 NT31 [1.0]
    S -> NT276 NT23 [0.0000307579]
    NT276 -> NT210 NT31 [1.0]
    S -> NT398 NT23 [0.0000307579]
    NT398 -> NT319 NT390 [1.0]
    S -> NT481 NT31 [0.000123031]
    NT481 -> NT464 NT59 [1.0]
    NT464 -> NT319 NT15 [1.0]
    S -> UnaryNT4 NT31 [0.000461368]
    S -> NT34 NT31 [0.000184547]
    S -> UnaryNT11 UnaryNT4 [0.0025529]
    S -> NT220 NT17 [0.000307579]
    S -> UnaryNT11 NT32 [0.00206078]
    S -> NT25 NT29 [0.00184547]
    S -> NT13 NT29 [0.000492126]
    NT29 -> NT29 NT29 [0.0146654]
    S -> NT25 NT13 [0.00203002]
    S -> NT409 NT165 [0.0000922736]
    NT409 -> NT63 NT15 [1.0]
    NT165 -> UnaryNT3 NT31 [1.0]
    S -> NT208 NT165 [0.000184547]
    NT208 -> UnaryNT1 NT33 [1.0]
    S -> NT566 NT165 [0.0000615157]
    NT566 -> NT144 NT24 [1.0]
    NT144 -> UnaryNT1 NT53 [1.0]
    NT53 -> UnaryNT11 NT20 [1.0]
    NT18 -> NT18 NT18 [0.00127592]
    S -> NT566 NT345 [0.0000307579]
    NT345 -> NT165 NT17 [1.0]
    NT31 -> NT31 NT31 [0.00318471]
    S -> NT208 NT345 [0.0000615157]
    S -> NT438 NT305 [0.000123031]
    NT438 -> NT402 NT71 [1.0]
    NT402 -> NT144 NT23 [1.0]
    NT23 -> NT23 NT23 [0.0436118]
    NT71 -> NT13 NT23 [0.9]
    NT305 -> NT233 NT116 [1.0]
    S -> NT311 NT305 [0.000184547]
    NT311 -> UnaryNT1 NT56 [1.0]
    S -> NT653 NT651 [0.0000307579]
    NT653 -> NT402 NT24 [1.0]
    NT651 -> NT23 NT116 [1.0]
    S -> NT653 NT116 [0.0000307579]
    S -> NT16 NT417 [0.000153789]
    NT417 -> NT199 NT116 [1.0]
    NT199 -> NT20 NT24 [1.0]
    S -> NT208 NT651 [0.0000307579]
    S -> NT208 NT116 [0.0000615157]
    S -> NT161 NT413 [0.000153789]
    NT413 -> NT33 NT79 [1.0]
    S -> NT519 NT79 [0.0000922736]
    NT519 -> UnaryNT11 NT377 [1.0]
    NT377 -> NT67 NT24 [1.0]
    S -> NT513 NT14 [0.0000615157]
    NT513 -> UnaryNT11 NT73 [1.0]
    S -> UnaryNT1 NT672 [0.0000307579]
    NT672 -> NT513 NT541 [1.0]
    NT541 -> NT59 NT14 [1.0]
    S -> NT220 NT14 [0.000123031]
    S -> NT291 NT79 [0.000338337]
    NT291 -> UnaryNT1 NT49 [1.0]
    NT49 -> Digit NT20 [1.0]
    S -> NT88 NT31 [0.000246063]
    S -> NT212 NT79 [0.000307579]
    NT212 -> UnaryNT1 NT34 [1.0]
    S -> NT34 NT23 [0.000707431]
    S -> NT22 NT13 [0.00249139]
    S -> NT22 NT24 [0.00227608]
    S -> Digit NT29 [0.000307579]
    S -> NT78 NT29 [0.000153789]
    S -> Digit NT77 [0.000184547]
    S -> NT78 NT77 [0.0000922736]
    S -> Digit NT24 [0.000123031]
    S -> NT78 NT24 [0.000123031]
    S -> NT19 NT29 [0.00316806]
    S -> NT19 NT13 [0.00224532]
    S -> NT19 NT24 [0.00212229]
    S -> Digit NT21 [0.00178396]
    S -> Digit NT83 [0.000492126]
    NT83 -> NT21 NT15 [1.0]
    S -> Digit NT98 [0.000276821]
    NT98 -> NT21 NT39 [1.0]
    S -> NT114 NT13 [0.000984252]
    NT114 -> Digit NT21 [1.0]
    S -> NT19 NT66 [0.000768947]
    S -> NT20 NT66 [0.000922736]
    S -> NT20 NT24 [0.000707431]
    S -> NT35 NT66 [0.000922736]
    S -> NT54 NT24 [0.000615157]
    S -> NT40 NT20 [0.00203002]
    S -> NT97 NT13 [0.0019685]
    NT97 -> NT40 NT20 [1.0]
    S -> NT99 NT28 [0.00113804]
    S -> NT19 NT30 [0.000799705]
    S -> NT22 NT30 [0.000369094]
    S -> NT96 NT28 [0.000830463]
    S -> Digit NT460 [0.000123031]
    NT460 -> NT102 NT13 [1.0]
    NT102 -> UnaryNT11 UnaryNT4 [1.0]
    S -> NT40 NT26 [0.00110728]
    S -> NT112 NT13 [0.00150714]
    NT112 -> NT40 NT26 [1.0]
    S -> NT26 NT66 [0.000522884]
    S -> NT26 NT24 [0.00043061]
    S -> NT111 NT15 [0.00110728]
    S -> NT162 NT15 [0.000707431]
    NT162 -> NT13 NT53 [1.0]
    S -> UnaryNT11 NT20 [0.00261442]
    S -> NT53 NT62 [0.00184547]
    S -> NT13 NT62 [0.00252215]
    S -> NT45 NT31 [0.000184547]
    S -> NT55 NT349 [0.000246063]
    NT349 -> NT30 NT116 [1.0]
    S -> NT25 NT27 [0.000123031]
    S -> NT21 NT27 [0.000123031]
    S -> NT19 NT77 [0.0011688]
    S -> NT492 NT664 [0.0000307579]
    NT492 -> NT172 NT316 [1.0]
    NT316 -> NT57 NT28 [1.0]
    NT664 -> UnaryNT5 NT57 [1.0]
    S -> NT324 NT664 [0.0000307579]
    NT324 -> NT16 NT28 [1.0]
    S -> NT492 NT395 [0.0000307579]
    NT395 -> UnaryNT5 NT316 [1.0]
    S -> NT324 NT395 [0.0000307579]
    S -> NT508 NT29 [0.0000307579]
    NT508 -> NT492 NT395 [1.0]
    S -> NT523 NT29 [0.0000307579]
    NT523 -> NT324 NT395 [1.0]
    S -> NT508 NT432 [0.0000307579]
    UnaryNT1 -> UnaryNT1 UnaryNT1 [0.00549659]
    NT432 -> NT29 NT17 [1.0]
    S -> NT523 NT432 [0.0000307579]
    S -> NT21 NT77 [0.000522884]
    S -> UnaryNT1 NT721 [0.0000307579]
    NT721 -> NT197 NT86 [1.0]
    NT197 -> NT26 NT77 [1.0]
    S -> NT729 NT690 [0.0000307579]
    NT729 -> NT76 UnaryNT12 [1.0]
    NT76 -> NT26 NT23 [1.0]
    NT690 -> NT478 NT233 [1.0]
    NT478 -> NT471 UnaryNT3 [1.0]
    NT471 -> NT16 NT23 [1.0]
    S -> NT96 NT13 [0.000215305]
    S -> NT284 NT116 [0.0000307579]
    S -> NT268 NT116 [0.0000922736]
    S -> UnaryNT11 NT399 [0.000123031]
    NT399 -> NT284 UnaryNT3 [1.0]
    S -> UnaryNT11 NT452 [0.000123031]
    NT452 -> NT268 UnaryNT3 [1.0]
    S -> UnaryNT11 NT18 [0.0000307579]
    S -> NT293 UnaryNT3 [0.000215305]
    NT293 -> NT161 NT179 [1.0]
    S -> NT293 NT116 [0.000123031]
    S -> NT88 NT15 [0.000123031]
    S -> NT88 NT39 [0.0000615157]
    S -> NT22 NT77 [0.000553642]
    S -> NT38 NT183 [0.000338337]
    NT183 -> NT144 UnaryNT3 [1.0]
    S -> NT275 NT15 [0.0000615157]
    NT275 -> NT38 NT183 [1.0]
    S -> NT53 NT23 [0.000615157]
    S -> NT13 NT20 [0.000707431]
    S -> NT78 NT13 [0.000522884]
    S -> NT78 NT61 [0.0000307579]
    S -> NT37 NT28 [0.0000615157]
    S -> NT119 NT29 [0.0000307579]
    NT119 -> NT22 NT13 [1.0]
    S -> NT119 NT77 [0.0000307579]
    S -> NT108 NT13 [0.0000615157]
    NT108 -> NT19 NT29 [1.0]
    S -> NT124 NT29 [0.0000615157]
    NT124 -> NT19 NT13 [1.0]
    S -> NT96 NT29 [0.0000922736]
    S -> NT13 NT77 [0.0000922736]
    S -> NT103 NT13 [0.0000615157]
    NT103 -> NT22 NT29 [1.0]
    S -> NT125 NT13 [0.000123031]
    NT125 -> NT25 NT29 [1.0]
    S -> NT53 NT31 [0.000338337]
    S -> NT99 NT13 [0.0000307579]
    S -> NT172 NT581 [0.0000615157]
    NT581 -> NT73 NT17 [1.0]
    S -> NT94 NT15 [0.000399852]
    S -> NT514 NT79 [0.0000307579]
    NT514 -> UnaryNT1 NT26 [1.0]
    S -> NT574 NT23 [0.0000615157]
    NT574 -> NT514 NT79 [1.0]
    S -> NT51 NT23 [0.0000922736]
    S -> NT148 NT28 [0.0000615157]
    NT148 -> NT32 NT15 [1.0]
    S -> NT409 NT345 [0.0000615157]
    S -> NT679 NT345 [0.0000307579]
    NT679 -> NT144 NT15 [1.0]
    S -> NT53 NT15 [0.000276821]
    S -> NT53 NT39 [0.000399852]
    S -> NT445 NT79 [0.0000307579]
    NT445 -> NT16 NT20 [1.0]
    S -> NT67 NT446 [0.000123031]
    NT446 -> NT77 NT116 [1.0]
    S -> NT377 NT116 [0.000123031]
    S -> NT25 NT66 [0.000738189]
    S -> NT324 NT23 [0.0000922736]
    S -> NT71 NT520 [0.0000307579]
    NT520 -> UnaryNT11 NT478 [1.0]
    S -> NT13 NT520 [0.0000615157]
    S -> NT13 NT157 [0.00086122]
    NT157 -> NT54 NT15 [1.0]
    S -> NT21 NT13 [0.00178396]
    S -> NT21 NT24 [0.00209154]
    S -> NT67 NT421 [0.0000307579]
    S -> NT322 NT86 [0.000276821]
    NT322 -> UnaryNT1 NT158 [1.0]
    NT158 -> NT26 NT15 [1.0]
    S -> NT76 NT233 [0.0000922736]
    S -> NT76 NT15 [0.0000922736]
    S -> UnaryNT9 NT15 [0.000123031]
    S -> UnaryNT9 NT39 [0.0000922736]
    S -> NT54 NT15 [0.0005844]
    S -> NT54 NT31 [0.000399852]
    S -> NT648 NT79 [0.0000307579]
    NT648 -> UnaryNT1 NT20 [1.0]
    S -> UnaryNT4 NT754 [0.0000307579]
    NT754 -> NT226 NT750 [1.0]
    NT750 -> NT56 UnaryNT3 [1.0]
    S -> NT40 NT183 [0.000522884]
    S -> Digit NT53 [0.000799705]
    S -> NT131 NT13 [0.00126107]
    NT131 -> Digit NT53 [1.0]
    S -> NT54 NT29 [0.000123031]
    S -> NT54 NT13 [0.000399852]
    S -> NT67 NT432 [0.0000615157]
    S -> UnaryNT11 NT739 [0.0000307579]
    NT739 -> NT67 NT597 [1.0]
    NT597 -> NT29 NT14 [1.0]
    S -> NT55 NT432 [0.0000307579]
    S -> UnaryNT11 NT668 [0.0000307579]
    NT668 -> NT55 NT597 [1.0]
    S -> NT25 NT77 [0.000338337]
    S -> NT212 NT116 [0.00043061]
    S -> NT208 NT86 [0.000153789]
    S -> UnaryNT1 NT605 [0.0000615157]
    NT605 -> NT177 NT86 [1.0]
    NT177 -> NT20 NT77 [1.0]
    S -> NT32 NT23 [0.000184547]
    S -> NT56 NT31 [0.0000307579]
    S -> NT351 NT31 [0.0000615157]
    NT351 -> NT32 NT23 [1.0]
    S -> NT50 NT23 [0.0000615157]
    S -> NT50 NT31 [0.0000615157]
    S -> NT25 NT17 [0.0000307579]
    S -> Digit NT385 [0.0000307579]
    NT385 -> UnaryNT5 NT354 [1.0]
    NT354 -> NT140 UnaryNT3 [1.0]
    S -> NT21 NT385 [0.000153789]
    S -> NT36 NT23 [0.000184547]
    S -> NT49 NT23 [0.0000922736]
    S -> NT102 NT31 [0.000153789]
    S -> NT45 NT23 [0.000153789]
    S -> NT46 NT23 [0.000123031]
    S -> NT46 NT31 [0.0000615157]
    S -> NT32 NT66 [0.000799705]
    S -> NT106 NT19 [0.0000922736]
    S -> NT75 UnaryNT9 [0.0000922736]
    NT75 -> NT19 NT15 [1.0]
    NT15 -> NT15 NT15 [0.0110581]
    S -> NT246 UnaryNT9 [0.000184547]
    NT246 -> Digit NT75 [1.0]
    S -> NT40 UnaryNT9 [0.000492126]
    S -> NT87 NT526 [0.0000922736]
    NT526 -> UnaryNT12 UnaryNT9 [1.0]
    S -> NT200 NT15 [0.000123031]
    NT200 -> NT40 UnaryNT9 [1.0]
    S -> Digit NT75 [0.000338337]
    S -> NT200 NT28 [0.000123031]
    S -> NT87 NT28 [0.000153789]
    S -> NT200 NT88 [0.0000922736]
    S -> NT87 NT88 [0.000123031]
    S -> NT470 NT23 [0.0000307579]
    NT470 -> UnaryNT1 NT103 [1.0]
    S -> NT103 NT61 [0.0000922736]
    S -> NT33 NT13 [0.0000922736]
    S -> NT122 NT28 [0.0000922736]
    S -> NT21 NT23 [0.000123031]
    S -> NT376 NT13 [0.0000922736]
    NT376 -> NT21 NT23 [1.0]
    S -> NT56 NT13 [0.000215305]
    S -> NT38 NT290 [0.000276821]
    NT290 -> NT232 UnaryNT3 [1.0]
    NT232 -> UnaryNT1 NT54 [1.0]
    S -> NT13 NT54 [0.000369094]
    S -> NT53 NT29 [0.000338337]
    S -> NT53 NT13 [0.000338337]
    S -> NT53 NT24 [0.000399852]
    S -> NT470 NT14 [0.0000922736]
    S -> NT43 NT23 [0.000123031]
    S -> NT101 NT79 [0.000184547]
    S -> NT43 NT369 [0.0000615157]
    NT369 -> UnaryNT7 NT104 [1.0]
    S -> NT237 NT104 [0.0000307579]
    NT237 -> Digit UnaryNT7 [1.0]
    S -> NT53 NT66 [0.0000615157]
    S -> NT201 NT86 [0.0000307579]
    NT201 -> Digit NT139 [1.0]
    S -> NT44 NT369 [0.0000615157]
    S -> NT13 NT369 [0.0000922736]
    S -> NT257 NT27 [0.000123031]
    NT257 -> NT255 NT37 [1.0]
    NT255 -> NT237 NT115 [1.0]
    S -> NT43 NT44 [0.0000615157]
    S -> NT257 NT139 [0.000338337]
    S -> NT43 NT221 [0.000676673]
    NT221 -> NT13 NT139 [1.0]
    S -> NT536 NT622 [0.0000307579]
    NT536 -> NT44 NT416 [1.0]
    NT416 -> UnaryNT7 UnaryNT1 [1.0]
    NT622 -> UnaryNT11 NT104 [1.0]
    S -> NT600 NT622 [0.0000307579]
    NT600 -> NT13 NT416 [1.0]
    S -> NT558 NT79 [0.0000922736]
    NT558 -> UnaryNT1 NT201 [1.0]
    S -> NT115 Digit [0.000307579]
    S -> NT121 NT350 [0.0000307579]
    NT350 -> UnaryNT11 NT121 [1.0]
    S -> NT43 NT350 [0.000215305]
    S -> NT95 NT193 [0.000215305]
    NT193 -> UnaryNT11 NT57 [1.0]
    S -> Digit NT193 [0.000461368]
    S -> NT113 NT29 [0.0000307579]
    NT113 -> NT25 NT13 [1.0]
    S -> NT113 NT77 [0.0000307579]
    S -> NT678 NT79 [0.0000307579]
    NT678 -> UnaryNT1 NT155 [1.0]
    NT155 -> NT25 NT24 [1.0]
    S -> NT189 NT79 [0.0000615157]
    NT189 -> UnaryNT1 NT113 [1.0]
    S -> NT83 NT28 [0.0000922736]
    S -> NT230 NT13 [0.0000922736]
    NT230 -> NT40 NT183 [1.0]
    S -> NT13 NT60 [0.000123031]
    NT60 -> NT20 NT23 [1.0]
    S -> NT55 NT279 [0.000338337]
    NT279 -> NT13 NT79 [1.0]
    S -> NT232 NT116 [0.0000922736]
    S -> NT274 NT210 [0.0000307579]
    S -> NT16 NT210 [0.0000307579]
    S -> NT274 NT463 [0.0000615157]
    NT463 -> NT210 NT23 [1.0]
    S -> NT16 NT463 [0.0000615157]
    S -> NT590 NT194 [0.0000307579]
    NT590 -> NT274 NT210 [1.0]
    S -> NT660 NT194 [0.0000307579]
    NT660 -> NT16 NT210 [1.0]
    S -> NT172 NT601 [0.0000615157]
    NT601 -> NT190 NT512 [1.0]
    NT190 -> NT20 NT15 [1.0]
    NT512 -> NT79 NT194 [1.0]
    S -> NT457 NT194 [0.0000307579]
    NT457 -> NT16 NT215 [1.0]
    S -> NT655 NT541 [0.0000615157]
    NT655 -> UnaryNT1 NT624 [1.0]
    NT624 -> Digit NT399 [1.0]
    S -> NT648 NT512 [0.0000307579]
    S -> NT172 NT329 [0.000276821]
    NT329 -> NT65 NT31 [1.0]
    S -> NT172 NT405 [0.000153789]
    NT405 -> NT144 NT165 [1.0]
    S -> NT521 NT165 [0.0000307579]
    NT521 -> NT16 NT53 [1.0]
    S -> NT521 NT345 [0.0000615157]
    S -> NT13 NT53 [0.000615157]
    S -> NT20 NT13 [0.000461368]
    S -> NT26 NT13 [0.00043061]
    S -> NT40 NT290 [0.0000615157]
    S -> Digit NT54 [0.000184547]
    S -> Digit NT218 [0.000307579]
    NT218 -> NT54 NT13 [1.0]
    S -> NT328 NT35 [0.000276821]
    NT328 -> NT46 UnaryNT7 [1.0]
    S -> NT445 NT116 [0.0000922736]
    S -> NT95 NT15 [0.000123031]
    S -> NT102 NT39 [0.000399852]
    S -> UnaryNT4 NT23 [0.0000615157]
    S -> UnaryNT4 NT194 [0.0000615157]
    S -> NT76 NT39 [0.0000307579]
    S -> NT89 NT13 [0.000276821]
    S -> NT265 NT86 [0.000276821]
    NT265 -> UnaryNT1 NT109 [1.0]
    NT109 -> NT25 NT15 [1.0]
    S -> NT286 NT233 [0.0000922736]
    NT286 -> NT25 NT23 [1.0]
    S -> NT286 NT15 [0.000123031]
    S -> NT265 NT79 [0.000153789]
    S -> NT49 NT170 [0.000215305]
    S -> NT140 NT165 [0.000215305]
    S -> NT440 NT165 [0.000123031]
    NT440 -> NT67 NT39 [1.0]
    S -> NT21 NT31 [0.000276821]
    S -> NT228 NT31 [0.000645915]
    NT228 -> UnaryNT1 NT117 [1.0]
    S -> NT95 NT39 [0.0000307579]
    S -> NT13 UnaryNT10 [0.000246063]
    S -> NT333 NT13 [0.0000307579]
    NT333 -> NT13 UnaryNT10 [1.0]
    S -> NT13 NT308 [0.0000307579]
    S -> NT387 NT165 [0.000153789]
    NT387 -> UnaryNT1 NT382 [1.0]
    NT382 -> NT40 NT232 [1.0]
    S -> NT387 NT345 [0.0000307579]
    S -> NT74 NT14 [0.0000922736]
    NT74 -> NT13 NT31 [1.0]
    S -> NT256 NT86 [0.00043061]
    NT256 -> UnaryNT1 NT112 [1.0]
    S -> NT200 NT13 [0.0000307579]
    S -> UnaryNT1 NT586 [0.0000615157]
    NT586 -> NT95 NT14 [1.0]
    S -> NT600 NT576 [0.0000307579]
    NT576 -> NT102 UnaryNT3 [1.0]
    S -> NT536 NT576 [0.0000615157]
    S -> NT22 NT23 [0.000246063]
    S -> NT490 NT30 [0.0000307579]
    NT490 -> UnaryNT1 NT468 [1.0]
    NT468 -> NT46 UnaryNT3 [1.0]
    S -> NT756 UnaryNT3 [0.0000307579]
    NT756 -> Digit NT692 [1.0]
    NT692 -> NT226 NT665 [1.0]
    NT665 -> UnaryNT9 NT47 [1.0]
    S -> UnaryNT1 NT331 [0.000246063]
    NT331 -> NT114 NT79 [1.0]
    S -> NT338 NT23 [0.0000307579]
    NT338 -> UnaryNT1 NT331 [1.0]
    S -> NT56 NT14 [0.000184547]
    S -> NT22 NT251 [0.0000922736]
    NT251 -> NT248 UnaryNT3 [1.0]
    NT248 -> NT226 NT22 [1.0]
    S -> Digit NT251 [0.000307579]
    S -> NT269 NT47 [0.000123031]
    NT269 -> Digit NT251 [1.0]
    S -> NT81 NT19 [0.0000922736]
    S -> NT80 NT29 [0.0000307579]
    S -> NT34 NT47 [0.000153789]
    S -> NT34 NT52 [0.0000307579]
    S -> NT104 NT27 [0.000184547]
    S -> NT94 NT29 [0.0000307579]
    S -> NT153 NT13 [0.0000307579]
    NT153 -> NT26 NT29 [1.0]
    S -> NT71 NT14 [0.0000307579]
    S -> NT262 NT14 [0.0000307579]
    NT262 -> NT256 NT86 [1.0]
    S -> NT75 NT28 [0.000123031]
    S -> NT61 NT13 [0.0000922736]
    S -> NT360 NT13 [0.0000922736]
    NT360 -> NT75 NT28 [1.0]
    S -> NT321 NT17 [0.0000922736]
    NT321 -> NT36 NT23 [1.0]
    S -> NT244 NT17 [0.000123031]
    NT244 -> NT152 NT23 [1.0]
    S -> NT16 NT436 [0.000123031]
    NT436 -> NT26 NT116 [1.0]
    S -> UnaryNT10 NT14 [0.0000307579]
    S -> NT524 NT23 [0.0000615157]
    NT524 -> UnaryNT10 NT14 [1.0]
    S -> UnaryNT4 NT170 [0.000123031]
    S -> NT364 NT13 [0.0000922736]
    NT364 -> UnaryNT4 NT170 [1.0]
    S -> NT34 NT13 [0.000123031]
    S -> UnaryNT4 NT36 [0.000153789]
    S -> NT403 NT170 [0.0000307579]
    NT403 -> UnaryNT4 NT36 [1.0]
    S -> NT36 NT170 [0.0000615157]
    S -> NT172 NT549 [0.0000922736]
    NT549 -> NT57 NT215 [1.0]
    S -> NT208 NT79 [0.000276821]
    S -> NT102 NT15 [0.0000615157]
    S -> NT94 NT13 [0.000338337]
    S -> NT134 NT29 [0.000215305]
    NT134 -> NT21 NT13 [1.0]
    S -> NT188 NT13 [0.000215305]
    NT188 -> NT13 UnaryNT8 [1.0]
    S -> NT188 NT24 [0.0000615157]
    S -> NT13 UnaryNT8 [0.000399852]
    S -> NT188 NT17 [0.000307579]
    S -> NT81 NT28 [0.000123031]
    S -> NT81 NT15 [0.000123031]
    S -> NT144 NT116 [0.000215305]
    S -> NT44 NT31 [0.000123031]
    S -> NT25 NT23 [0.0000615157]
    S -> NT19 NT31 [0.0000307579]
    S -> UnaryNT11 NT84 [0.0000922736]
    NT84 -> NT32 NT31 [1.0]
    S -> NT140 NT86 [0.0000922736]
    S -> NT317 NT169 [0.0000615157]
    NT317 -> UnaryNT1 NT125 [1.0]
    NT169 -> NT13 NT86 [0.833333]
    S -> NT318 NT13 [0.0000307579]
    NT318 -> NT140 NT86 [1.0]
    S -> NT533 NT13 [0.0000307579]
    NT533 -> NT317 NT169 [1.0]
    S -> NT318 NT169 [0.0000307579]
    NT169 -> NT169 NT169 [0.166667]
    S -> NT318 NT196 [0.0000922736]
    NT196 -> NT169 NT14 [1.0]
    S -> NT317 NT196 [0.0000922736]
    S -> NT140 NT272 [0.0000307579]
    NT272 -> NT86 NT224 [1.0]
    NT224 -> NT196 NT15 [1.0]
    S -> NT317 NT224 [0.000123031]
    S -> NT311 NT224 [0.000123031]
    S -> NT189 NT272 [0.000399852]
    S -> NT580 NT15 [0.0000615157]
    NT580 -> NT318 NT14 [1.0]
    S -> NT361 NT15 [0.0000307579]
    NT361 -> NT56 NT14 [1.0]
    S -> NT34 NT15 [0.0000615157]
    S -> NT697 NT15 [0.0000307579]
    NT697 -> NT584 NT79 [1.0]
    NT584 -> NT567 NT71 [1.0]
    NT567 -> UnaryNT1 NT286 [1.0]
    NT71 -> NT71 NT71 [0.1]
    S -> NT584 NT633 [0.0000307579]
    NT633 -> NT279 NT15 [1.0]
    S -> NT567 NT633 [0.0000307579]
    S -> NT236 NT15 [0.000399852]
    NT236 -> NT189 NT79 [1.0]
    S -> NT49 NT363 [0.000153789]
    NT363 -> NT36 NT15 [1.0]
    S -> NT236 NT39 [0.0000615157]
    S -> NT236 NT23 [0.0000615157]
    S -> NT102 NT23 [0.0000307579]
    S -> UnaryNT1 NT241 [0.000553642]
    NT241 -> NT105 NT79 [1.0]
    NT105 -> NT81 NT13 [1.0]
    S -> NT13 NT551 [0.0000307579]
    NT551 -> NT102 NT15 [1.0]
    S -> NT45 NT548 [0.0000307579]
    NT548 -> NT58 NT336 [1.0]
    NT336 -> NT30 UnaryNT3 [1.0]
    S -> NT13 NT548 [0.0000615157]
    S -> NT237 NT563 [0.0000922736]
    NT563 -> NT490 NT557 [1.0]
    NT557 -> UnaryNT2 Digit [1.0]
    S -> NT404 NT454 [0.0000615157]
    NT404 -> NT16 NT336 [1.0]
    NT454 -> UnaryNT5 NT404 [1.0]
    S -> NT13 NT454 [0.0000615157]
    S -> NT96 NT24 [0.000123031]
    S -> NT21 NT61 [0.0000307579]
    S -> NT67 NT673 [0.0000307579]
    NT673 -> NT61 NT17 [1.0]
    S -> NT324 NT17 [0.0000615157]
    S -> UnaryNT1 NT585 [0.0000615157]
    NT585 -> NT138 NT86 [1.0]
    NT138 -> NT26 NT39 [1.0]
    S -> NT136 NT13 [0.0000307579]
    NT136 -> NT21 NT29 [1.0]
    S -> NT65 NT17 [0.0000307579]
    S -> NT42 NT31 [0.0000307579]
    NT42 -> NT13 NT17 [1.0]
    S -> NT547 NT31 [0.0000615157]
    NT547 -> NT65 NT17 [1.0]
    S -> NT307 NT86 [0.000307579]
    NT307 -> UnaryNT1 NT106 [1.0]
    S -> UnaryNT1 NT669 [0.0000307579]
    NT669 -> NT64 NT79 [1.0]
    S -> UnaryNT1 NT510 [0.0000922736]
    NT510 -> NT153 NT23 [1.0]
    S -> UnaryNT1 NT667 [0.0000307579]
    NT667 -> NT41 NT116 [1.0]
    S -> NT583 NT17 [0.0000615157]
    NT583 -> NT16 NT90 [1.0]
    S -> NT380 NT86 [0.000184547]
    NT380 -> UnaryNT1 NT163 [1.0]
    NT163 -> NT20 NT39 [1.0]
    S -> NT22 NT194 [0.000153789]
    S -> NT51 NT194 [0.0000922736]
    S -> NT539 NT215 [0.0000922736]
    NT539 -> NT284 UnaryNT4 [1.0]
    S -> NT34 NT170 [0.000246063]
    S -> NT363 NT593 [0.0000615157]
    NT593 -> NT20 NT170 [1.0]
    S -> NT295 NT170 [0.000184547]
    S -> UnaryNT1 NT621 [0.0000615157]
    NT621 -> NT159 NT116 [1.0]
    NT159 -> NT95 NT13 [1.0]
    S -> UnaryNT11 NT354 [0.0000615157]
    S -> NT612 UnaryNT3 [0.0000615157]
    NT612 -> NT161 NT159 [1.0]
    S -> UnaryNT11 NT57 [0.000246063]
    S -> NT161 NT588 [0.0000615157]
    NT588 -> NT88 UnaryNT3 [1.0]
    S -> NT36 NT31 [0.000215305]
    S -> NT21 NT28 [0.0000922736]
    S -> NT19 NT61 [0.0000615157]
""")
    N -> A14 B14 C14 [0.15] | B14 A14 C14 [0.17] | C14 A14 B14 [0.17] | C14 B14 A14 [0.17] | A14 C14 B14 [0.17] | B14 C14 A14 [0.17]
    O -> A15 B15 C15 [0.15] | B15 A15 C15 [0.17] | C15 A15 B15 [0.17] | C15 B15 A15 [0.17] | A15 C15 B15 [0.17] | B15 C15 A15 [0.17]
    P -> A16 B16 C16 [0.15] | B16 A16 C16 [0.17] | C16 A16 B16 [0.17] | C16 B16 A16 [0.17] | A16 C16 B16 [0.17] | B16 C16 A16 [0.17]
    Q -> A17 B17 C17 [0.15] | B17 A17 C17 [0.17] | C17 A17 B17 [0.17] | C17 B17 A17 [0.17] | A17 C17 B17 [0.17] | B17 C17 A17 [0.17]
    R -> A18 B18 C18 [0.15] | B18 A18 C18 [0.17] | C18 A18 B18 [0.17] | C18 B18 A18 [0.17] | A18 C18 B18 [0.17] | B18 C18 A18 [0.17]
    T -> A19 B19 C19 [0.15] | B19 A19 C19 [0.17] | C19 A19 B19 [0.17] | C19 B19 A19 [0.17] | A19 C19 B19 [0.17] | B19 C19 A19 [0.17]
    U -> A20 B20 C20 [0.15] | B20 A20 C20 [0.17] | C20 A20 B20 [0.17] | C20 B20 A20 [0.17] | A20 C20 B20 [0.17] | B20 C20 A20 [0.17]
    V -> A21 B21 C21 [0.15] | B21 A21 C21 [0.17] | C21 A21 B21 [0.17] | C21 B21 A21 [0.17] | A21 C21 B21 [0.17] | B21 C21 A21 [0.17]
    W -> A22 B22 C22 [0.15] | B22 A22 C22 [0.17] | C22 A22 B22 [0.17] | C22 B22 A22 [0.17] | A22 C22 B22 [0.17] | B22 C22 A22 [0.17]
    X -> A23 B23 C23 [0.15] | B23 A23 C23 [0.17] | C23 A23 B23 [0.17] | C23 B23 A23 [0.17] | A23 C23 B23 [0.17] | B23 C23 A23 [0.17]
    Y -> A24 B24 C24 [0.15] | B24 A24 C24 [0.17] | C24 A24 B24 [0.17] | C24 B24 A24 [0.17] | A24 C24 B24 [0.17] | B24 C24 A24 [0.17]
    Z -> A25 B25 C25 [0.15] | B25 A25 C25 [0.17] | C25 A25 B25 [0.17] | C25 B25 A25 [0.17] | A25 C25 B25 [0.17] | B25 C25 A25 [0.17]
    AS -> A26 B26 C26 [0.15] | B26 A26 C26 [0.17] | C26 A26 B26 [0.17] | C26 B26 A26 [0.17] | A26 C26 B26 [0.17] | B26 C26 A26 [0.17]
    BS -> A27 B27 C27 [0.15] | B27 A27 C27 [0.17] | C27 A27 B27 [0.17] | C27 B27 A27 [0.17] | A27 C27 B27 [0.17] | B27 C27 A27 [0.17]
    CS -> A28 B28 C28 [0.15] | B28 A28 C28 [0.17] | C28 A28 B28 [0.17] | C28 B28 A28 [0.17] | A28 C28 B28 [0.17] | B28 C28 A28 [0.17]
    DS -> A29 B29 C29 [0.15] | B29 A29 C29 [0.17] | C29 A29 B29 [0.17] | C29 B29 A29 [0.17] | A29 C29 B29 [0.17] | B29 C29 A29 [0.17]
    ES -> A30 B30 C30 [0.15] | B30 A30 C30 [0.17] | C30 A30 B30 [0.17] | C30 B30 A30 [0.17] | A30 C30 B30 [0.17] | B30 C30 A30 [0.17]
    FS -> A31 B31 C31 [0.15] | B31 A31 C31 [0.17] | C31 A31 B31 [0.17] | C31 B31 A31 [0.17] | A31 C31 B31 [0.17] | B31 C31 A31 [0.17]
    GS -> A32 B32 C32 [0.15] | B32 A32 C32 [0.17] | C32 A32 B32 [0.17] | C32 B32 A32 [0.17] | A32 C32 B32 [0.17] | B32 C32 A32 [0.17]
    HS -> A33 B33 C33 [0.15] | B33 A33 C33 [0.17] | C33 A33 B33 [0.17] | C33 B33 A33 [0.17] | A33 C33 B33 [0.17] | B33 C33 A33 [0.17]
    IS -> A34 B34 C34 [0.15] | B34 A34 C34 [0.17] | C34 A34 B34 [0.17] | C34 B34 A34 [0.17] | A34 C34 B34 [0.17] | B34 C34 A34 [0.17]
    JS -> A35 B35 C35 [0.15] | B35 A35 C35 [0.17] | C35 A35 B35 [0.17] | C35 B35 A35 [0.17] | A35 C35 B35 [0.17] | B35 C35 A35 [0.17]
    KS -> A36 B36 C36 [0.15] | B36 A36 C36 [0.17] | C36 A36 B36 [0.17] | C36 B36 A36 [0.17] | A36 C36 B36 [0.17] | B36 C36 A36 [0.17]
    LS -> A37 B37 C37 [0.15] | B37 A37 C37 [0.17] | C37 A37 B37 [0.17] | C37 B37 A37 [0.17] | A37 C37 B37 [0.17] | B37 C37 A37 [0.17]
    MS -> A38 B38 C38 [0.15] | B38 A38 C38 [0.17] | C38 A38 B38 [0.17] | C38 B38 A38 [0.17] | A38 C38 B38 [0.17] | B38 C38 A38 [0.17]
    NS -> A39 B39 C39 [0.15] | B39 A39 C39 [0.17] | C39 A39 B39 [0.17] | C39 B39 A39 [0.17] | A39 C39 B39 [0.17] | B39 C39 A39 [0.17]
    OS -> A40 B40 C40 [0.15] | B40 A40 C40 [0.17] | C40 A40 B40 [0.17] | C40 B40 A40 [0.17] | A40 C40 B40 [0.17] | B40 C40 A40 [0.17]
"""

alignment_grammar = PCFG.fromstring(alignment_grammar_str)
예제 #33
0
from nltk import PCFG
from nltk.parse.generate import generate
from nltk.text import Text
import random

pgrammar = PCFG.fromstring("""
S -> Pendant [1]
Pendant -> Figure [1]
Figure -> Primitive [.25] | Concatenated [.07] | Intersected [.05] | Overlapped [.14] | Mirror [.21] | Extrude [.18] | Hull [.1]
Mirror -> 'mirror' '[' Figure ']' [1]
Extrude -> 'extrude' '[' 2DPrimitive ']' [1]
Hull -> 'hull' '[' Figure ']' [1]
Concatenated -> 'concat' '[' FiguresArray ']' [1]
Overlapped -> 'overlap' '[' FiguresArray ']' [1]
Intersected -> 'intersect' '[' FiguresArray ']' [1]
FiguresArray -> Figure [0.6] | Figure FiguresArray [0.4]
Primitive -> 3DPrimitive [0.5] | 2DPrimitive [0.5]
3DPrimitive ->  'triangle'  | 'sphere' [1]
2DPrimitive -> 'square' [0.5] | 'circle' [0.5]
""")

print(pgrammar)

def generate_sample(grammar, prod, frags):        
    if prod in grammar._lhs_index: # Derivation
        derivations = grammar._lhs_index[prod]            
        derivation = weighted_choice(derivations)            
        for d in derivation._rhs:            
            generate_sample(grammar, d, frags)
    elif prod in grammar._rhs_index:
        # terminal
예제 #34
0
from chords_generation.learn_trees_music_generate import remove_symbols, generate_sample
from melodies_generation.bass_generation import write_bass_ly, convert_ly_notation_bass, put_p
from melodies_generation.piano_generation import convert_ly_notation_piano, annotate_rythm_variations, write_piano_ly, generate_midi
from melodies_generation.percusion_generation import ly_notation_percusion, get_rythm_tim,get_rythm_congas,get_rythm_cam,get_rythm_gui,get_rythm_mar, write_percusion_ly
from melodies_generation.wav import combine
from nltk import PCFG, Nonterminal
import os, subprocess
if __name__ =='__main__':
    grammar_file = open("grammar/pcfg.txt", "r")
    grammar_str = grammar_file.read()
    grammar_file.close()
    # print ("GRAMÁTICA CON + \n"+grammar_str)
    grammar_str= remove_symbols(grammar_str)
    # print ("GRAMÁTICA SIN + \n"+grammar_str)
    grammar = PCFG.fromstring(grammar_str)
    #grammar = learn_trees(three_trees)
    #f = open("pcfg.txt", "w")
    #f.write(str(grammar))
    #f.close()
    s= Nonterminal('S')
    s = [s]
    chords = generate_sample(grammar, s, [])
    print (chords)
    tonality="A"
    title = 'Automatic Generated Song'
    composer ='Grupo Niche'
    copyright = 'Brayan Rodríguez'
    time= '2/2'
    tempo = '100'
    chords_without_points = [x for x in chords if x != '.'] #remove points from chords
    #piano
예제 #35
0
def generate():
    print ('generate')
    tonality = str(request.args.get('tonality'))
    tempo = str(request.args.get('tempo'))
    dest_path = '../front/src/static/'
    print (tonality)
    print (tempo)
    grammar_file = open('flask_app/grammar/pcfg.txt', 'r')
    grammar_str = grammar_file.read()
    grammar_file.close()
    # print ("GRAMÁTICA CON + \n"+grammar_str)
    grammar_str= remove_symbols(grammar_str)
    # print ("GRAMÁTICA SIN + \n"+grammar_str)
    grammar = PCFG.fromstring(grammar_str)
    #grammar = learn_trees(three_trees)
    #f = open("pcfg.txt", "w")
    #f.write(str(grammar))
    #f.close()
    s= Nonterminal('S')
    s = [s]
    chords = generate_sample(grammar, s, [])
    print (chords)
    title = 'Automatic Generated Song'
    composer ='Grupo Niche'
    copyright = 'Brayan Rodríguez'
    time= '2/2'
    chords_without_points = [x for x in chords if x != '.'] #remove points from chords
    #piano
    chords_variations = annotate_rythm_variations(chords_without_points, 4)
    (upper_staff, lower_staff) = convert_ly_notation_piano(chords_variations, tonality)
    file_name_piano = 'piano_salsa'
    write_piano_ly(upper_staff, lower_staff, title, composer, copyright, tonality, time, tempo, file_name_piano)
    generate_midi(file_name_piano)
    #to convert midi into wav
    with open(os.devnull, 'wb') as devnull:
        subprocess.check_call(['timidity', file_name_piano+'.midi','-Ow'], stdout=devnull, stderr=subprocess.STDOUT)
    #bass
    file_name_bass = 'bass_salsa'
    chords_variations = put_p(chords_without_points)
    bass_staff = convert_ly_notation_bass(chords_variations, tonality)
    write_bass_ly(bass_staff, title, composer, copyright, tonality, time, tempo, file_name_bass)
    generate_midi(file_name_bass)
    #to convert midi into wav
    with open(os.devnull, 'wb') as devnull:
        subprocess.check_call(['timidity', file_name_bass+'.midi', '-Ow'], stdout=devnull, stderr=subprocess.STDOUT)
    combine(file_name_piano+'.wav', file_name_bass+'.wav', 'piano_bass.wav')
    #percusion
    measures = len(chords_without_points)/2
    congas = get_rythm_congas()
    staff_c = ly_notation_percusion(congas, measures)
    cam = get_rythm_cam()
    staff_cam = ly_notation_percusion(cam, measures)
    tim = get_rythm_tim()
    staff_t = ly_notation_percusion(tim, measures)
    gui = get_rythm_gui()
    staff_g = ly_notation_percusion(gui, measures)
    mar = get_rythm_mar()
    staff_m = ly_notation_percusion(mar, measures)
    file_name_percusion = 'percusion_salsa'
    write_percusion_ly(staff_c,staff_cam,staff_t,staff_g,staff_m, title, composer, copyright, tonality, time, tempo, file_name_percusion)
    generate_midi(file_name_percusion)
    #to convert midi into wav
    with open(os.devnull, 'wb') as devnull:
        subprocess.check_call(['timidity', file_name_percusion+'.midi', '-Ow'], stdout=devnull, stderr=subprocess.STDOUT)
    file_name_combined = 'combined.wav'
    combine('piano_bass.wav', file_name_percusion+'.wav', file_name_combined)
    #to move to front end
    with open(os.devnull, 'wb') as devnull:
        subprocess.check_call(['mv', file_name_combined, dest_path], stdout=devnull, stderr=subprocess.STDOUT)
        subprocess.check_call(['mv', file_name_bass + '.pdf', dest_path], stdout=devnull, stderr=subprocess.STDOUT)
        subprocess.check_call(['mv', file_name_piano + '.pdf', dest_path], stdout=devnull, stderr=subprocess.STDOUT)
    # combined_file = open('combined.wav', 'rb')
    # combined_content = combined_file.read()
    # resp = make_response(combined_content)
    # combined_file.close()
    # resp = make_response(send_file('../combined.wav'))
    grammar_file = open('flask_app/chords_generation/chosen_prod.txt', 'r')
    chosen_prod = grammar_file.read()
    grammar_file.close()
    resp_json = json.dumps({'error': False, 'components': chosen_prod})
    resp = Response(resp_json)
    resp.headers['Access-Control-Allow-Origin'] = '*'
    # resp.headers['Content-Type'] = 'audio/wav'
    # resp.headers['Content-Disposition'] = 'attachment;filename=combined.wav'
    return resp
예제 #36
0
grammar = PCFG.fromstring('''
    S -> side eqside [0.766312]
    side -> side plusterm [0.215816]
    side -> side minusterm [0.204728]
    side -> number variable [0.192571]
    number -> digit number [0.2953]
    digit -> '2' [0.20094]
    number -> '8' [0.0512296]
    variable -> 'x' [1.0]
    minusterm -> minus term [1.0]
    minus -> '-' [1.0]
    term -> digit number [0.185462]
    digit -> '6' [0.062679]
    number -> '0' [0.0918352]
    plusterm -> plus term [1.0]
    plus -> '+' [1.0]
    term -> number divnum [0.205817]
    number -> '2' [0.114112]
    divnum -> div number [1.0]
    div -> '/' [1.0]
    number -> '5' [0.0972758]
    eqside -> eq negside [0.402479]
    eq -> '=' [1.0]
    negside -> minus side [1.0]
    digit -> '1' [0.356414]
    S -> negside eqside [0.233688]
    side -> number divnum [0.0921555]
    digit -> '7' [0.0299559]
    number -> '3' [0.0644647]
    eqside -> eq side [0.597521]
    side -> 'x' [0.0759721]
    side -> '6' [0.00732374]
    digit -> '9' [0.0175681]
    number -> '6' [0.0557699]
    number -> '4' [0.0737897]
    number -> '7' [0.0733781]
    digit -> '3' [0.150809]
    term -> number variable [0.314671]
    side -> number divnumvar [0.0415636]
    divnumvar -> divnum variable [1.0]
    side -> digit number [0.074099]
    number -> dot number [0.0103797]
    dot -> '.' [1.0]
    number -> '1' [0.0461748]
    term -> 'x' [0.032113]
    digit -> '8' [0.0327874]
    digit -> '4' [0.0841404]
    digit -> '5' [0.0397696]
    number -> '9' [0.0262901]
    side -> '1' [0.0088784]
    side -> number parenside [0.0178505]
    parenside -> lparen siderparen [1.0]
    lparen -> '(' [1.0]
    siderparen -> side rparen [0.841012]
    term -> '5' [0.0212008]
    rparen -> ')' [1.0]
    term -> number parenside [0.0322466]
    term -> '3' [0.00984322]
    side -> parenside divnum [0.00844759]
    term -> parenside divnum [0.00383039]
    digit -> '0' [0.0249365]
    side -> '3' [0.00307185]
    term -> '4' [0.0399964]
    term -> '9' [0.0138963]
    term -> '1' [0.0294406]
    side -> '5' [0.00533828]
    term -> '8' [0.0189293]
    side -> '4' [0.00842886]
    side -> '2' [0.0126808]
    term -> '2' [0.035008]
    term -> number divnumvar [0.0251203]
    side -> '0' [0.0074174]
    side -> number vardivnum [0.00829774]
    vardivnum -> variable divnum [1.0]
    term -> number vardivnum [0.00944237]
    side -> dot number [0.0017045]
    side -> number starnum [0.00129243]
    starnum -> star number [1.0]
    star -> '*' [1.0]
    side -> '9' [0.00340901]
    term -> '7' [0.0130946]
    siderparen -> negside rparen [0.158988]
    term -> dot number [0.00218243]
    term -> number starnum [0.0014698]
    term -> '0' [0.00080171]
    side -> '8' [0.00681801]
    side -> '7' [0.00213531]
    term -> '6' [0.00543381]
''')
예제 #37
0
    type=str,
    default='ex_output.txt',
    help='File that contains the generated sentences from this program.')
args = parser.parse_args()


# can specify depth if needed
def generate_sample(grammar):
    rules = grammar._lhs_index
    start = grammar._start

    import pdb
    pdb.set_trace()


grammar_text = ""

with open(args.rule_file) as file:
    for line in file:
        grammar_text += line

grammar = PCFG.fromstring(grammar_text)

generate_sample(grammar)

sentences = generate(grammar, n=args.num_examples)

with open(args.output_file, 'w+') as file:
    for s in sentences:
        file.write(' '.join(s) + '\n')
예제 #38
0
파일: p1.py 프로젝트: iamchanthu/code
from nltk.corpus import treebank
from nltk import PCFG, CFG
cfg_grammar = CFG.fromstring("""
 S -> NP VP
 NP -> ART N | N N | N | NP PP
 VP -> V | V NP | V NP PP
 PP -> P NP
 ART -> 'a'
 N -> 'flower' | 'a' | 'blooms'
 V -> 'blooms' | 'flower'
 """)
pcfg_grammar = PCFG.fromstring("""
 S -> NP VP [1.0]
 NP -> ART N [0.53] | N N [0.09] | N [0.14] | NP PP [0.24]
 VP -> V [0.386] | V NP [0.393] | V NP PP [0.22]
 PP -> P NP [1.0]
 ART -> 'a' [1.0]
 N -> 'flower' [0.8] | 'a' [0.1] | 'blooms' [0.1]
 V -> 'blooms' [0.8] | 'flower' [0.2]
 """)

from nltk.parse import RecursiveDescentParser

print(cfg_grammar)
rd = RecursiveDescentParser(pcfg_grammar)
text = "a flower blooms".split()
for t in rd.parse(text):
    print(t)

#rd.draw()
예제 #39
0
파일: cfg.py 프로젝트: wojohowitz00/StackNN
EPOCHS = 30

#############################################################
# grammars and coding

# each name_grammar is followed by:
# name_code_for = the codes for the terminals, and the fill code (last)
# name_predict = a list of the terminals to predict in a string
# name_sample_depth = max depth of derivations to produce sample

# Dyck language on two kinds of parentheses
# terminals to predict: ), ]
parentheses_grammar = PCFG.fromstring("""
S -> S S [0.20]
S -> '(' S ')' [0.20] | '(' ')' [0.20]
S -> '[' S ']' [0.20] | '[' ']' [0.20]
""")
parentheses_code_for = {u'(': 0, u')': 1, u'[': 2, u']': 3, '#': 4}
parentheses_predict = [u')', u']']
parentheses_sample_depth = 5
# depth = 5 yields 15,130 strings

# center-marked "palindromes" (using primed symbols to predict)
# terminals to predict: a1, b1
reverse_grammar = PCFG.fromstring("""
S -> "a" S "a1" [0.48]
S -> "b" S "b1" [0.48]
S -> "c" [0.04]
""")
reverse_code_for = {u"a": 0, u"b": 1, u"a1": 2, u"b1": 3, u"c": 4, u"#": 5}
예제 #40
0
import nltk
from nltk import PCFG
from nltk.grammar import FeatureGrammar
from nltk.parse import BottomUpLeftCornerChartParser

with open('lexicon.txt', 'r') as lexicon_file, \
     open('corpus_fixed.txt', 'r') as corpus_file, \
     open('CFG_altered.txt', 'r') as config_file:

    lexicon_text = lexicon_file.read()
    corpus = corpus_file.read()
    cfg_text = config_file.read()

    cfg_full = 'start -> ROOT [1.0] \n' + cfg_text  #+ '\n' + lexicon_text
    # print(cfg_full)

    grammar = PCFG.fromstring(cfg_full)
    # grammar._start = 'S'