Exemple #1
0
def demo():
    """
    A simple demonstration function for the C{Tagger} classes.  It
    constructs a backoff tagger using a trigram tagger, bigram tagger
    unigram tagger and a default tagger.  It trains and tests the
    tagger using the Brown corpus.
    """
    from en.parser.nltk_lite.corpora import brown
    from en.parser.nltk_lite import tag
    import sys

    print('Training taggers.')

    # Create a default tagger
    t0 = tag.Default('nn')

    t1 = tag.Unigram(cutoff=1, backoff=t0)
    t1.train(brown.tagged('a'), verbose=True)

    t2 = tag.Affix(-3, 5, cutoff=2, backoff=t0)
    t2.train(brown.tagged('a'), verbose=True)

    t3 = tag.Regexp([(r'.*ed', 'vbd')], backoff=t0)  # no training

    t4 = tag.Lookup({'the': 'dt'}, backoff=t0)

    test_tokens = []
    num_words = 0

    print('='*75)
    print('Running the taggers on test data...')
    print('  Default (nn) tagger: ', end=' ')
    sys.stdout.flush()
    _demo_tagger(t0, brown.tagged('b'))

    print('  Unigram tagger:      ', end=' ')
    sys.stdout.flush()
    _demo_tagger(t1, list(brown.tagged('b'))[:1000])

    print('  Affix tagger:        ', end=' ')
    sys.stdout.flush()
    _demo_tagger(t2, list(brown.tagged('b'))[:1000])

    print('  Regexp tagger:       ', end=' ')
    sys.stdout.flush()
    _demo_tagger(t3, list(brown.tagged('b'))[:1000])

    print('  Lookup tagger:       ', end=' ')
    sys.stdout.flush()
    _demo_tagger(t4, list(brown.tagged('b'))[:1000])
def demo(num_sents=100,
         max_rules=200,
         min_score=2,
         error_output="errors.out",
         rule_output="rules.out",
         randomize=False,
         train=.8,
         trace=3):
    """
    Brill Tagger Demonstration

    @param num_sents: how many sentences of training and testing data to use
    @type num_sents: L{int}
    @param max_rules: maximum number of rule instances to create
    @type max_rules: L{int}
    @param min_score: the minimum score for a rule in order for it to be considered
    @type min_score: L{int}
    @param error_output: the file where errors will be saved
    @type error_output: L{string}
    @param rule_output: the file where rules will be saved
    @type rule_output: L{string}
    @param randomize: whether the training data should be a random subset of the corpus
    @type randomize: L{boolean}
    @param train: the fraction of the the corpus to be used for training (1=all)
    @type train: L{float}
    @param trace: the level of diagnostic tracing output to produce (0-3)
    @type train: L{int}
    """

    from en.parser.nltk_lite.corpora import treebank
    from en.parser.nltk_lite import tag
    from en.parser.nltk_lite.tag import brill

    NN_CD_tagger = tag.Regexp([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')])

    # train is the proportion of data used in training; the rest is reserved
    # for testing.

    print "Loading tagged data..."
    sents = list(treebank.tagged())
    if randomize:
        random.seed(len(sents))
        random.shuffle(sents)

    tagged_data = [t for s in sents[:num_sents] for t in s]
    cutoff = int(len(tagged_data) * train)

    training_data = tagged_data[:cutoff]
    gold_data = tagged_data[cutoff:]

    testing_data = [t[0] for t in gold_data]

    # Unigram tagger

    print "Training unigram tagger:",
    u = tag.Unigram(backoff=NN_CD_tagger)

    # NB training and testing are required to use a list-of-lists structure,
    # so we wrap the flattened corpus data with the extra list structure.
    u.train([training_data])
    print("[accuracy: %f]" % tag.accuracy(u, [gold_data]))

    # Brill tagger

    templates = [
        brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule,
                                               (1, 1)),
        brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule,
                                               (2, 2)),
        brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule,
                                               (1, 2)),
        brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule,
                                               (1, 3)),
        brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule,
                                               (1, 1)),
        brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule,
                                               (2, 2)),
        brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule,
                                               (1, 2)),
        brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule,
                                               (1, 3)),
        brill.ProximateTokensTemplate(brill.ProximateTagsRule, (-1, -1),
                                      (1, 1)),
        brill.ProximateTokensTemplate(brill.ProximateWordsRule, (-1, -1),
                                      (1, 1)),
    ]

    #trainer = brill.FastBrillTrainer(u, templates, trace)
    trainer = brill.BrillTrainer(u, templates, trace)
    b = trainer.train(training_data, max_rules, min_score)

    print
    print("Brill accuracy: %f" % tag.accuracy(b, [gold_data]))

    print("\nRules: ")
    printRules = file(rule_output, 'w')
    for rule in b.rules():
        print(str(rule))
        printRules.write(str(rule) + "\n\n")

    testing_data = list(b.tag(testing_data))
    el = errorList(gold_data, testing_data)
    errorFile = file(error_output, 'w')

    for e in el:
        errorFile.write(e + "\n\n")
    errorFile.close()
    print "Done; rules and errors saved to %s and %s." % (rule_output,
                                                          error_output)