Beispiel #1
0
 def trainUniTnT(self):
     """train unigram and tnt seperatly without DefaultTagger"""
     self.split_into_folds()
     for k in range(1, (self.folds + 1)):
         train_sents = sum(self.foldlist[: (self.folds - 1)], [])
         tnt_tagger = tnt.TnT(N=100)
         tnt_tagger.train(train_sents)
         print(str(k) + " fold: tnt evaluated")
         unigram = UnigramTagger(train_sents)
         print(str(k) + " fold: unigram evaluated")
         to_tag = [untag(i) for i in self.foldlist[self.folds - 1]]
         self.tnt_tagged += tnt_tagger.tag_sents(to_tag)
         self.uni_tagged += unigram.tag_sents(to_tag)
         self.org_tagged += self.foldlist[self.folds - 1]
         self.foldlist = [self.foldlist[self.folds - 1]] + self.foldlist[: (self.folds - 1)]
     self.tnt = tnt_tagger
     self.unigram = unigram
     self.tnt_avg_acc = accuracy(sum(self.org_tagged, []), sum(self.tnt_tagged, []))
     self.uni_avg_acc = accuracy(sum(self.org_tagged, []), sum(self.uni_tagged, []))
     print("Accuracy of concatenated tnt-tagged sentences: ", self.tnt_avg_acc)
     print("Accuracy of concatenated unigram-tagged sentences: ", self.uni_avg_acc)
     (self.tnt_tagprecision, self.tnt_tagrecall) = self.tagprecision_recall(
         tnt_tagger, self.tnt_tagged, self.org_tagged
     )
     (self.unigram_tagprecision, self.unigram_tagrecall) = self.tagprecision_recall(
         unigram, self.uni_tagged, self.org_tagged
     )
     # delete following values so that trainRegexp has the inicial values
     self.org_tagged = []
     self.foldlist = []
     for i in range(1, self.folds + 1):
         self.foldlist.append(self.create_fold(i))
Beispiel #2
0
 def evaluate(self,tagged_gold_sents):
     gold_tokens = []
     for sent in tagged_gold_sents:
         for tup in sent:
             gold_tokens.append(tup[0])
     test_tagged_tokens = self.tag(gold_tokens)
     gold_tagged_tokens = sum(tagged_gold_sents,[])
     print accuracy(gold_tagged_tokens, test_tagged_tokens) 
Beispiel #3
0
def ocr_metrics(reference, result):
    """
    Function for return metrics about OCR tests, for validate our OCR service

    :param reference: Raw text, which is the text that we are testing
    :param result: Text after OCR
    :return: Accuracy and Precision metrics

    NLTK lib need SET object type to calculate Precision.
    For Accuracy, might be SET, but a List is better, because needs to be 2 lists with same length
    """

    # Import NLTK metric class
    from nltk.metrics import accuracy, precision

    # Return results after comparison
    if len(reference) == len(result):
        print("Accuracy: {0:.2f}%".format(accuracy(reference, result) * 100))
        print("Precision: {0:.2f}%".format(
            precision(set(reference), set(result)) * 100))
    else:
        print(
            "Is not possible to calculate accuracy, texts doesn't have the same length. "
            "\nExpected: {0} \nResult: {1}".format(len(reference),
                                                   len(result)))
        print("Precision: {0:.2f}%".format(
            precision(set(reference), set(result)) * 100))
Beispiel #4
0
 def trainRegexp(self, backoff):
     self.split_into_folds()
     for k in range(1, (self.folds + 1)):
         train_sents = sum(self.foldlist[: (self.folds - 1)], [])
         if self.option_tone == "tonal" and self.option_tag == "Affixes":
             regex = RegexpTonalSA(backoff)
         if self.option_tone == "tonal" and self.option_tag == "POS":
             regex = RegexpTonal(backoff)
         if self.option_tone == "nontonal" and self.option_tag == "Affixes":
             regex = RegexpSA(backoff)
         if self.option_tone == "nontonal" and self.option_tag == "POS":
             regex = Regexp(backoff)
         to_tag = [untag(i) for i in self.foldlist[self.folds - 1]]
         self.regex_tagged += regex.tag_sents(to_tag)
         self.org_tagged += self.foldlist[self.folds - 1]
         self.foldlist = [self.foldlist[self.folds - 1]] + self.foldlist[: (self.folds - 1)]
     self.regex = regex
     self.regex_avg_acc = accuracy(sum(self.org_tagged, []), sum(self.regex_tagged, []))
     print("Accuracy of concatenated regexp-tagged sentences: ", self.regex_avg_acc)
     (self.regex_tagprecision, self.regex_tagrecall) = self.tagprecision_recall(
         regex, self.regex_tagged, self.org_tagged
     )
     self.org_tagged = []
     self.foldlist = []
     for i in range(1, self.folds + 1):
         self.foldlist.append(self.create_fold(i))
Beispiel #5
0
def evaluate(self, gold):
    "overriding evaluate from nltk.TaggerI, it seems to have a bug"
    tagged_sents = [
        list(s) for s in self.tag_sents(untag(sent) for sent in gold)
    ]
    gold_tokens = sum(gold, [])
    test_tokens = sum(tagged_sents, [])
    return accuracy(gold_tokens, test_tokens)
Beispiel #6
0
    def test(self, test_sequence, verbose=False, **kwargs):
        """
        Tests the HiddenMarkovModelTagger instance.

        :param test_sequence: a sequence of labeled test instances
        :type test_sequence: list(list)
        :param verbose: boolean flag indicating whether training should be
            verbose or include printed output
        :type verbose: bool
        """
        def words(sent):
            return [word for (word, tag) in sent]

        def tags(sent):
            return [tag for (word, tag) in sent]

        def flatten(seq):
            return list(itertools.chain(*seq))

        test_sequence = self._transform(test_sequence)
        predicted_sequence = list(map(self._tag, map(words, test_sequence)))

        if verbose:
            for test_sent, predicted_sent in zip(test_sequence,
                                                 predicted_sequence):
                print(
                    "Test:",
                    " ".join("%s/%s" % (token, tag)
                             for (token, tag) in test_sent),
                )
                print()
                print("Untagged:",
                      " ".join("%s" % token for (token, tag) in test_sent))
                print()
                print(
                    "HMM-tagged:",
                    " ".join("%s/%s" % (token, tag)
                             for (token, tag) in predicted_sent),
                )
                print()
                print(
                    "Entropy:",
                    self.entropy([(token, None)
                                  for (token, tag) in predicted_sent]),
                )
                print()
                print("-" * 60)

        test_tags = flatten(map(tags, test_sequence))
        predicted_tags = flatten(map(tags, predicted_sequence))

        acc = accuracy(test_tags, predicted_tags)
        count = sum(len(sent) for sent in test_sequence)
        print("accuracy over %d tokens: %.2f" % (count, acc * 100))
Beispiel #7
0
 def evaluate(self, gold):
     '''
     Score the accuracy of the tagger against the gold standard.
     Strip the tags from the gold standard text,retag it using
     the tagger,then compute the accuracy score.
     :param gold: 真实的标记
     :return: 准确率
     '''
     tagged_sents = self.tag_sents(untag(sent) for sent in gold)
     gold_tokens = sum(gold, [])
     test_tokens = sum(tagged_sents, [])
     return accuracy(gold_tokens, test_tokens)
Beispiel #8
0
    def test(self, test_sequence, **kwargs):
        """
        Tests the HiddenMarkovModelTagger instance.

    	:param test_sequence: a sequence of labeled test instances
        :type test_sequence: list(list)
        :param verbose: boolean flag indicating whether training should be
            verbose or include printed output
        :type verbose: bool
        """

        def words(sent):
            return [word for (word, tag) in sent]

        def tags(sent):
            return [tag for (word, tag) in sent]

        test_sequence = LazyMap(self._transform.transform, test_sequence)
        predicted_sequence = LazyMap(self._tag, LazyMap(words, test_sequence))

        if kwargs.get('verbose', False):
            # This will be used again later for accuracy so there's no sense
            # in tagging it twice.
            test_sequence = list(test_sequence)
            predicted_sequence = list(predicted_sequence)

            for test_sent, predicted_sent in zip(test_sequence,
                                                 predicted_sequence):
                print 'Test:', \
                    ' '.join(['%s/%s' % (str(token), str(tag))
                              for (token, tag) in test_sent])
                print
                print 'Untagged:', \
                    ' '.join([str(token) for (token, tag) in test_sent])
                print
                print 'HMM-tagged:', \
                    ' '.join(['%s/%s' % (str(token), str(tag))
                              for (token, tag) in predicted_sent])
                print
                print 'Entropy:', \
                    self.entropy([(token, None) for
                                  (token, tag) in predicted_sent])
                print
                print '-' * 60

        test_tags = LazyConcatenation(LazyMap(tags, test_sequence))
        predicted_tags = LazyConcatenation(LazyMap(tags, predicted_sequence))

        acc = accuracy(test_tags, predicted_tags)

        count = sum([len(sent) for sent in test_sequence])

        print 'accuracy over %d tokens: %.2f' % (count, acc * 100)
Beispiel #9
0
    def test(self, test_sequence, **kwargs):
        """
        Tests the HiddenMarkovModelTagger instance.

    	:param test_sequence: a sequence of labeled test instances
        :type test_sequence: list(list)
        :param verbose: boolean flag indicating whether training should be
            verbose or include printed output
        :type verbose: bool
        """

        def words(sent):
            return [word for (word, tag) in sent]

        def tags(sent):
            return [tag for (word, tag) in sent]

        test_sequence = LazyMap(self._transform.transform, test_sequence)
        predicted_sequence = LazyMap(self._tag, LazyMap(words, test_sequence))

        if kwargs.get('verbose', False):
            # This will be used again later for accuracy so there's no sense
            # in tagging it twice.
            test_sequence = list(test_sequence)
            predicted_sequence = list(predicted_sequence)

            for test_sent, predicted_sent in zip(test_sequence,
                                                 predicted_sequence):
                print 'Test:', \
                    ' '.join(['%s/%s' % (str(token), str(tag))
                              for (token, tag) in test_sent])
                print
                print 'Untagged:', \
                    ' '.join([str(token) for (token, tag) in test_sent])
                print
                print 'HMM-tagged:', \
                    ' '.join(['%s/%s' % (str(token), str(tag))
                              for (token, tag) in predicted_sent])
                print
                print 'Entropy:', \
                    self.entropy([(token, None) for
                                  (token, tag) in predicted_sent])
                print
                print '-' * 60

        test_tags = LazyConcatenation(LazyMap(tags, test_sequence))
        predicted_tags = LazyConcatenation(LazyMap(tags, predicted_sequence))

        acc = accuracy(test_tags, predicted_tags)

        count = sum([len(sent) for sent in test_sequence])

        print 'accuracy over %d tokens: %.2f' % (count, acc * 100)
    def test( self, test_sequence, verbose = False, **kwargs ):
        """
        Tests the HiddenMarkovModelTagger instance.

        :param test_sequence: a sequence of labeled test instances
        :type test_sequence: list(list)
        :param verbose: boolean flag indicating whether training should be
            verbose or include printed output
        :type verbose: bool
        """


        def words( sent ):
            return [ word for (word, tag) in sent ]


        def tags( sent ):
            return [ tag for (word, tag) in sent ]


        def flatten( seq ):
            return list( itertools.chain( *seq ) )


        test_sequence = self._transform( test_sequence )
        predicted_sequence = list( imap( self._tag, imap( words, test_sequence ) ) )

        if verbose:
            for test_sent, predicted_sent in izip( test_sequence, predicted_sequence ):
                print( 'Test:',
                       ' '.join( '%s/%s' % (token, tag)
                                 for (token, tag) in test_sent ) )
                print( )
                print( 'Untagged:',
                       ' '.join( "%s" % token for (token, tag) in test_sent ) )
                print( )
                print( 'HMM-tagged:',
                       ' '.join( '%s/%s' % (token, tag)
                                 for (token, tag) in predicted_sent ) )
                print( )
                print( 'Entropy:',
                       self.entropy( [ (token, None) for
                                       (token, tag) in predicted_sent ] ) )
                print( )
                print( '-' * 60 )

        test_tags = flatten( imap( tags, test_sequence ) )
        predicted_tags = flatten( imap( tags, predicted_sequence ) )

        acc = accuracy( test_tags, predicted_tags )
        count = sum( len( sent ) for sent in test_sequence )
        print( 'accuracy over %d tokens: %.2f' % (count, acc * 100) )
Beispiel #11
0
    def evaluate(self, gold):
        # Convert nltk.Tree chunked sentences to (word, pos, iob) triplets
        chunked_sents = [tree2conlltags(sent) for sent in gold]

        # Convert (word, pos, iob) triplets to tagged tuples ((word, pos), iob)
        chunked_sents = [triplets2tagged_pairs(sent) for sent in chunked_sents]

        print(chunked_sents)

        dataset = self.tagger._todataset(chunked_sents)
        featuresets, tags = zip(*dataset)
        predicted_tags = self.tagger.classifier().classify_many(featuresets)
        return accuracy(tags, predicted_tags)
Beispiel #12
0
def main():
    #X_test = TideneIterCSVClass(PATH+teste)
    #X_train = TideneIterCSVClass(PATH+treinamento)

    X_train = TideneIterCSVTaggingExtraction(PATH + treinamento)
    X_test = TideneIterCSVTaggingExtraction(PATH + teste)

    Y_test = pd.read_csv(os.path.join(os.path.dirname(__file__), PATH + teste),
                         header=0,
                         delimiter=";",
                         usecols=["section"],
                         quoting=3)

    Y_train = pd.read_csv(os.path.join(os.path.dirname(__file__),
                                       PATH + treinamento),
                          header=0,
                          delimiter=";",
                          usecols=["section"],
                          quoting=3)

    #Estatistica
    sections = ["A", "B", "C", "D", "E", "F", "G", "H"]
    print("Conjunto de treinamento ...")
    result = [(x, Y_train['section'].tolist().count(x)) for x in sections]
    print(result)

    print("Conjunto de teste ...")
    result = [(x, Y_test['section'].tolist().count(x)) for x in sections]
    print(result)

    from sklearn.feature_extraction.text import TfidfTransformer
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.naive_bayes import MultinomialNB
    from sklearn.metrics import confusion_matrix

    tfidf_transformer = TfidfVectorizer()

    #------------ SVC test ---------------------
    X_train = TideneIterCSVTaggingExtraction(PATH + treinamento)
    X_test = TideneIterCSVTaggingExtraction(PATH + teste)
    clf = LinearSVC(C=0.177828).fit(tfidf_transformer.fit_transform(X_train),
                                    Y_train['section'].tolist())

    predict = clf.predict(tfidf_transformer.transform(X_test))

    print(accuracy(Y_test['section'].tolist(), predict))

    cm = confusion_matrix(Y_test['section'].tolist(), predict, labels=sections)
    print(cm)
Beispiel #13
0
    def evaluate(self, gold):
        """
        Score the accuracy of the tagger against the gold standard.
        Strip the tags from the gold standard text, retag it using
        the tagger, then compute the accuracy score.

        :type gold: list(list(tuple(str, str)))
        :param gold: The list of tagged sentences to score the tagger on.
        :rtype: float
        """

        tagged_sents = self.tag_sents(untag(sent) for sent in gold)
        gold_tokens = sum(gold, [])
        test_tokens = sum(tagged_sents, [])
        return accuracy(gold_tokens, test_tokens)
Beispiel #14
0
    def evaluate(self, gold):
        """
        Score the accuracy of the tagger against the gold standard.
        Strip the tags from the gold standard text, retag it using
        the tagger, then compute the accuracy score.

        :type gold: list(list(tuple(str, str)))
        :param gold: The list of tagged sentences to score the tagger on.
        :rtype: float
        """

        tagged_sents = self.tag_sents(untag(sent) for sent in gold)
        gold_tokens = sum(gold, [])
        test_tokens = sum(tagged_sents, [])
        return accuracy(gold_tokens, test_tokens)
Beispiel #15
0
def evaluate(reference, pred):    
    print "%20s%20s" % ("Predictions:",pred)
    print "\nEVALUATION"
    print("Accuracy  = " , accuracy(reference,pred))
    #pos_ref = [ref for ref in reference if ref==1]
    true_pos = [pred[i] for i in range(len(pred)) if pred[i]== reference[i]==1]
    false_pos = [pred[i] for i in range(len(pred)) if pred[i]== 1 and reference[i]==0]
    #true_neg = [pred[i] for i in range(len(pred)) if pred[i]== reference[i]==0]
    false_neg = [pred[i] for i in range(len(pred)) if pred[i]== 0 and reference[i]==1]
    precision =  float (len(true_pos)) / (len(true_pos) + len(false_neg))
    recall =  float (len(true_pos)) / (len(true_pos) + len(false_pos))    
    f_measure = float (2 * precision * recall) / (precision + recall)
    
    print("Precision = " , precision)
    print("Recall    = " , recall)
    print("F_measure = " , f_measure)
Beispiel #16
0
def evaluate(taggers, testing_data='en-ud-dev.conllu'):
    from nltk.metrics import accuracy
    from itertools import chain
    if isinstance(taggers, str):
        import pickle
        import os
        with open(taggers, "rb") as fo:
            taggers = pickle.load(fo)
    if isinstance(testing_data, str):
        from corpkit.conll import path_to_df
        testing_data = path_to_df(testing_data)
    for feat, tagger in taggers.items():
        test_bit = testing_data[['w', feat, 'x']]
        untagged = [
            d.astype(object).values.tolist()
            for x, d in test_bit[['w', 's']].groupby('s')
        ]
        retagged_sents = tagger.tag_sents(untagged)
        gold_tokens = [tuple(i) for i in test_bit[['w', feat]].values.tolist()]
        test_tokens = list(chain(*retagged_sents))
        print('%s Accuracy: ' % feat, accuracy(gold_tokens, test_tokens))
Beispiel #17
0
Datei: 6-7.py Projekt: jbbe/lang
    def evaluate(self, gold):
        """
        Score the accuracy of the tagger against the gold standard.
        Strip the tags from the gold standard text, retag it using
        the tagger, then compute the accuracy score.

        :type gold: list(list(tuple(str, str)))
        :param gold: The list of tagged sentences to score the tagger on.
        :rtype: float
        """

        tagged_posts = self.tag(nltk.tag.untag(gold))
        # gold_tokens = list(chain(*gold))
        gold_tags = [t for (w, t) in gold]
        test_tags = [t for (w, t) in tagged_posts]
        # print(len(gold_tags), len(test_tags))
        # print(test_tags)
        # test_tokens = list(chain(*tagged_posts))
        # print(len(tagged_posts))
        # , len(gold_tokens), len(test_tokens))
        return accuracy(gold_tags, test_tags)
obtained_class = []

for row in reader:
    print(row)
    title, category = row
    wiki_page = wikipedia.page(title)
    wiki_content = str.lower(wiki_page.content)
    tech_count = wiki_content.count("technology")
    politics_count = wiki_content.count("politics")
    business_count = wiki_content.count("business")
    travel_count = wiki_content.count("travel")
    max_count = max(tech_count, politics_count, business_count, travel_count)

    class_atr = "politics"

    if max_count == travel_count:
        class_atr = "travel"
    if max_count == tech_count:
        class_atr = "technology"
    if max_count == business_count:
        class_atr = "business"

    print("Actual Class : " + category)
    print("Obtained Class : " + class_atr)
    actual_class.append(category.strip())
    obtained_class.append(class_atr)

accuracy_baseline = accuracy(obtained_class, actual_class) * 100

print('Accuracy of baseline : ' + str(accuracy_baseline) + "%")
Beispiel #19
0
 def evaluate(self, gold):
     tagged_sents = self.tag_sents(untag(sent) for sent in gold)
     gold_tokens = list(itertools.chain(*gold))
     print(json.dumps(gold_tokens))
     print(len(tagged_sents), len(gold_tokens))
     return accuracy(gold_tokens, tagged_sents)
Beispiel #20
0
        print("Running on dev")
        test_data = [json.loads(line) for line in open_file("twt.dev.json")]
    else:
        print("Running on test")
        test_data = [json.loads(line) for line in open_file("twt.test.json")]
    test_data = handle_lowfreq_words(vocab)(test_data)
    twitter_model = hmm.HiddenMarkovModelTagger(symbols=hmm_model.symbols,
                                                states=tagset,
                                                transitions=transition_model,
                                                outputs=emission_model,
                                                priors=init_model)

    # Compute the accuracy - we can call this, but then we just do extra decoding
    # work. What we really need is just call nltk.metrics.accuracy on the gold and
    # predicted.
    # twitter_model.test( test_data )

    # Compute the confusion matrix, technically we would be doing this twice, as
    # when computing accuracy we would've already done this. It would be more
    # optimal to modify the hmm library. But meh.
    gold = tag_list(test_data)
    unlabeled_data = LazyMap(unlabeled_words, test_data)
    predicted_labels = list(LazyMap(twitter_model.tag, unlabeled_data))
    predicted = tag_list(predicted_labels)

    acc = accuracy(gold, predicted)
    print("Accuracy: ", acc)
    cm = ConfusionMatrix(gold, predicted)
    print(cm.pretty_format(sort_by_count=True, show_percents=True,
                           truncate=25))
Beispiel #21
0
 def evaluate_test(self):
     return accuracy(self.test_original, self.test)
Beispiel #22
0
	def evaluate(self, gold):
		tagged_sents = self.tag_sents(([word[:-1] for word in sentence] for sentence in gold))
		return accuracy(sum(gold, []), sum(tagged_sents, []))
Beispiel #23
0
 def evaluate(self, gold):
     tagged_sents = self.tag_sents(untag(sent) for sent in gold)
     gold_tokens = list(itertools.chain(*gold))
     return accuracy(gold_tokens, tagged_sents)
Beispiel #24
0
from __future__ import print_function
from nltk.metrics import accuracy, precision, recall, f_measure

training = 'PERSON OTHER PERSON OTHER OTHER ORGANIZATION'.split()
testing = 'PERSON OTHER OTHER OTHER OTHER OTHER'.split()

print(accuracy(training, testing))

trainset = set(training)
testset = set(testing)
print(precision(trainset, testset))  # 准确率
print(recall(trainset, testset))  # 召回率
print(f_measure(trainset, testset))

# 计算 编辑距离 复制 cost为0, 替换、删除、插入cost为1
from nltk.metrics import edit_distance
print(edit_distance('relate', 'relation'))
print(edit_distance('suggestion', 'calculation'))

# 使用Jaccard系数执行相似性度量 , |X 交 Y|/|X 并 Y|
from nltk.metrics import jaccard_distance
X = set([10, 20, 30, 40])
Y = set([20, 30, 60])
print(jaccard_distance(X, Y))

# 二进制距离算法度量 便签相同返回0.0,否则1.0
from nltk.metrics import binary_distance
print(binary_distance(X, Y))

# 存在多个标签,Masi距离算法
from nltk.metrics import masi_distance
Beispiel #25
0
 def evaluate(self, gold):
     dataset = self._todataset(gold)
     featuresets, tags = zip(*dataset)
     predicted_tags = self.classifier().classify_many(featuresets)
     return accuracy(tags, predicted_tags)
for dictionary in dictionaries:

    # from LexiconClassifier library
    classifier = Classifier(dictionary)

    # build the train and test set
    word_vector = negative_words + positive_words
    gold_standard = [-1 for i in range(len(negative_words))
                     ] + [1 for i in range(len(positive_words))]
    results = [classifier.classify(s) for s in word_vector]

    # print the classification results
    print 'Dictionary : ', dictionary.get_name(), '\n'
    print ConfusionMatrix(gold_standard, results).pp()
    print 'Accuracy: ', accuracy(gold_standard, results)
    for c in [0, 1, -1]:
        print 'Metrics for class ', c
        gold = set()
        test = set()
        for i, x in enumerate(gold_standard):
            if x == c:
                gold.add(i)
        for i, x in enumerate(results):
            if x == c:
                test.add(i)
        print 'Precision: ', precision(gold, test)
        print 'Recall   : ', recall(gold, test)
        print 'F_measure: ', f_measure(gold, test)
    print '\n\n'
get_sent = lambda x: list(dict['id'][x])
pn['sent'] = pn['words'].apply(get_sent) #速度太慢,将词映射成id的形式

maxlen = 50

print("Pad sequences (samples x time)")
pn['sent'] = list(sequence.pad_sequences(pn['sent'], maxlen=maxlen))

x = np.array(list(pn['sent']))[::2] #训练集, ::2 行都要,列都要,步长为2 , list(pn['sent']) 将series直接转成list
y = np.array(list(pn['mark']))[::2]
xt = np.array(list(pn['sent']))[1::2] #测试集, 1::2 1表示行的起始偏移量为1
yt = np.array(list(pn['mark']))[1::2]
xa = np.array(list(pn['sent'])) #全集
ya = np.array(list(pn['mark']))

print('Build model...')
model = Sequential()
model.add(Embedding(len(dict)+1, 256))
model.add(LSTM(128)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='adam')

model.fit(x, y, batch_size=16, nb_epoch=10) #训练时间为若干个小时

classes = model.predict_classes(xt)
acc = accuracy(classes, yt)
# acc = np_utils.accuracy(classes, yt)
print('Test accuracy:', acc)
Beispiel #28
0
url =  urls[2]
train_comments_with_tag = tag_comment(comment, url)
a,b,c = probability_factor_script_interval_relation(url, comment)
like_cnt = int((len(train_comments_with_tag)*0.05))
train_end = int(like_cnt+(len(train_comments_with_tag)-like_cnt)*0.8)
#test_comment_with_tag =tag_comment(load_comment_data_test()[2], urls[2])
#untagged_test_comments = untag(test_comment_with_tag)

classifier = EvaluateFactorClass(train_comments_with_tag[:int(like_cnt*0.8)]
                                 +train_comments_with_tag[like_cnt:train_end]
                                 , url)

classify_result =classifier.tag(untag(train_comments_with_tag[int(like_cnt*0.8):like_cnt]
                                      +train_comments_with_tag[train_end:]))

print(accuracy(train_comments_with_tag[int(like_cnt*0.8):like_cnt]
                                      +train_comments_with_tag[train_end:], classify_result) )
'''
classifier = EvaluateFactorClass(train_comments_with_tag[:60]
                                 +train_comments_with_tag[75:275]
                                 , urls[8])

classify_result =classifier.tag(untag(train_comments_with_tag[60:75]
                                      +train_comments_with_tag[275:315]))

print(accuracy(train_comments_with_tag[60:75]
                                      +train_comments_with_tag[275:315], classify_result) )
'''
print("up to %d is liked" % like_cnt)
for i in range(len(classify_result)):
    if(classify_result[i][1]==1):    
        print(i)
 def evaluate(self, gold):
     tagged_sents = self.tag_sents(
         ([word[:-1] for word in sentence] for sentence in gold))
     return accuracy(sum(gold, []), sum(tagged_sents, []))
Beispiel #30
0
#script to validate coding
import cPickle as pickle
import sys
from nltk.metrics import accuracy, ConfusionMatrix, precision, recall, f_measure
from collections import defaultdict
import classifier

if __name__=='__main__':
	validation_pickle=sys.argv[1]
	classifier_pickle=sys.argv[2]
	validation_set=pickle.load(open(validation_pickle, 'rb'))
	c=pickle.load(open(classifier_pickle, 'rb'))
	
	reference=defaultdict(set)
	observed=defaultdict(set)
	for i, (tweet, label) in enumerate(validation_set):
		reference[label].add(i)
		observation=c.classify(tweet)
		observed[observation].add(i)
	
	print "accuracy: %s" % accuracy(observed, reference)
	print "pos precision: %s" % precision(reference['positive'], observed['positive'])
	print "pos recall: %s" % recall(reference['positive'], observed['positive'])
	print "pos f-measure: %s" % f_measure(reference['positive'], observed['positive'])
	print "neg precision: %s" % precision(reference['negative'], observed['negative'])
	print "neg recall: %s" % recall(reference['negative'], observed['negative'])
	print "neg f-measure: %s" % f_measure(reference['negative'], observed['negative'])
	
Beispiel #31
0
    def evaluate(self, gold):

        tagged_sents = self.tag_sents(untag(sent) for sent in gold)
        gold_tokens = list(chain(*gold))
        test_tokens = list(chain(*tagged_sents))
        return accuracy(gold_tokens, test_tokens)

for dictionary in dictionaries:

    # from LexiconClassifier library
    classifier = Classifier(dictionary)

    # build the train and test set
    word_vector = negative_words + positive_words
    gold_standard = [-1 for i in range(len(negative_words))] + [1 for i in range(len(positive_words))]
    results = [classifier.classify(s) for s in word_vector]

    # print the classification results
    print 'Dictionary : ', dictionary.get_name(), '\n'
    print ConfusionMatrix(gold_standard,results).pp()
    print 'Accuracy: ', accuracy(gold_standard,results)
    for c in [0,1,-1]:
        print 'Metrics for class ', c
        gold = set()
        test = set()
        for i,x in enumerate(gold_standard):
            if x == c:
                gold.add(i)
        for i,x in enumerate(results):
            if x == c:
                test.add(i)
        print 'Precision: ', precision(gold, test)
        print 'Recall   : ', recall(gold, test)
        print 'F_measure: ', f_measure(gold, test)
    print '\n\n'
Beispiel #33
0
 def trainALL(self, last):
     self.split_into_folds()
     for k in range(1, (self.folds + 1)):
         train_sents = sum(self.foldlist[: (self.folds - 1)], [])
         crf = CRFTagger(training_opt={"max_iterations": 100, "max_linesearch": 10, "c1": 0.0001, "c2": 1.0})
         crf_trained = crf.train(
             train_sents,
             "Models/model.crfCrossValidation1" + str(k) + self.option_tone + self.option_tag + ".tagger",
         )
         print(str(k) + " fold: crf")
         tnt_tagger = tnt.TnT(unk=DefaultTagger("n"), Trained=True, N=100)
         tnt_tagger.train(train_sents)
         print(str(k) + " fold: tnt")
         tag_set = set()
         symbols = set()
         for i in train_sents:
             for j in i:
                 tag_set.add(j[1])
                 symbols.add(j[0])
         trainer = HiddenMarkovModelTrainer(list(tag_set), list(symbols))
         hmm = trainer.train_supervised(train_sents, estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins))
         print(str(k) + " fold: hmm")
         if last == "U":
             lasttagger = UnigramTagger(train_sents, backoff=DefaultTagger("n"))
             print(str(k) + " fold: unigram")
         if last == "B":
             if self.option_tone == "tonal" and self.option_tag == "Affixes":
                 regex = RegexpTonalSA(DefaultTagger("n"))
             if self.option_tone == "tonal" and self.option_tag == "POS":
                 regex = RegexpTonal(DefaultTagger("n"))
             if self.option_tone == "nontonal" and self.option_tag == "Affixes":
                 regex = RegexpSA(DefaultTagger("n"))
             if self.option_tone == "nontonal" and self.option_tag == "POS":
                 regex = Regexp(DefaultTagger("n"))
             dic = dictionary_backoff(self.option_tone, regex)
             affix = AffixTagger(train_sents, min_stem_length=0, affix_length=-4, backoff=dic)
             lasttagger = BigramTagger(train_sents, backoff=affix)
             print(str(k) + " fold: bigram")
         to_tag = [untag(i) for i in self.foldlist[self.folds - 1]]
         self.crf_tagged += crf.tag_sents(to_tag)
         self.tnt_tagged += tnt_tagger.tag_sents(to_tag)
         self.hmm_tagged += hmm.tag_sents(to_tag)
         self.lasttagger_tagged += lasttagger.tag_sents(to_tag)
         self.org_tagged += self.foldlist[self.folds - 1]
         self.foldlist = [self.foldlist[self.folds - 1]] + self.foldlist[: (self.folds - 1)]
     self.crf = crf
     self.tnt = tnt_tagger
     self.hmm = hmm
     self.lasttagger = lasttagger
     org_words = sum(self.org_tagged, [])
     self.crf_avg_acc = accuracy(org_words, sum(self.crf_tagged, []))
     self.tnt_avg_acc = accuracy(org_words, sum(self.tnt_tagged, []))
     self.hmm_avg_acc = accuracy(org_words, sum(self.hmm_tagged, []))
     self.lasttagger_avg_acc = accuracy(org_words, sum(self.lasttagger_tagged, []))
     print("Accuracy of concatenated crf-tagged sentences: ", self.crf_avg_acc)
     print("Accuracy of concatenated tnt-tagged sentences: ", self.tnt_avg_acc)
     print("Accuracy of concatenated hmm-tagged sentences: ", self.hmm_avg_acc)
     print("Accuracy of concatenated " + last + "-tagged sentences: ", self.lasttagger_avg_acc)
     (self.crf_tagprecision, self.crf_tagrecall) = self.tagprecision_recall(crf, self.crf_tagged, self.org_tagged)
     (self.tnt_tagprecision, self.tnt_tagrecall) = self.tagprecision_recall(
         tnt_tagger, self.tnt_tagged, self.org_tagged
     )
     (self.hmm_tagprecision, self.hmm_tagrecall) = self.tagprecision_recall(hmm, self.hmm_tagged, self.org_tagged)
     (self.lasttagger_tagprecision, self.lasttagger_tagrecall) = self.tagprecision_recall(
         lasttagger, self.lasttagger_tagged, self.org_tagged
     )
     self.org_tagged = []
     self.foldlist = []
     for i in range(1, self.folds + 1):
         self.foldlist.append(self.create_fold(i))