/
solutionA2.py
354 lines (266 loc) · 13.5 KB
/
solutionA2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
#!/usr/bin/env python
import nltk, inspect
import itertools
from nltk.corpus import brown
from nltk.tag import map_tag
from nltk.probability import ConditionalProbDist,ConditionalFreqDist, LidstoneProbDist
assert map_tag('brown', 'universal', 'NR-TL') == 'NOUN', '''
Brown-to-Universal POS tag map is out of date.'''
class HMM:
def __init__(self, train_data, test_data):
"""
Initialise a new instance of the HMM.
:param train_data: The training dataset, a list of sentences with tags
:type train_data: list(list(tuple(str,str)))
:param test_data: the test/evaluation dataset, a list of sentence with tags
:type test_data: list(list(tuple(str,str)))
"""
self.train_data = train_data
self.test_data = test_data
# Emission and transition probability distributions
self.emission_PD: ConditionalProbDist = None
self.transition_PD: ConditionalProbDist = None
self.states = []
self.viterbi = []
self.backpointer = []
# Compute emission model using ConditionalProbDist with the estimator:
# Lidstone probability distribution with +0.01 added to the sample count for each bin and an extra bin
def emission_model(self, train_data):
"""
Compute an emission model using a ConditionalProbDist.
:param train_data: The training dataset, a list of sentences with tags
:type train_data: list(list(tuple(str,str)))
:return: The emission probability distribution and a list of the states
:rtype: Tuple[ConditionalProbDist, list(str)]
"""
# print(train_data)
# TODO prepare data
# Don't forget to lowercase the observation otherwise it mismatches the test data
# I want to make train_data into one list of tagged_words with type:(tuple(str,str))
data = []
for x in train_data:
# data += [ (tag, word.lower() if word.isalpha() else (tag, word)) for (word, tag) in x] # lower case and check word
data += [ (tag, word.lower() )for (word, tag) in x] # lower case
# TODO compute the emission model
emission_FD = ConditionalFreqDist(data)
# need Lidstone bin parameter
lidstone_estimator = lambda fd: LidstoneProbDist(fd, 0.01, fd.B() + 1)
self.emission_PD = ConditionalProbDist(emission_FD, lidstone_estimator)
self.states = emission_FD.keys()
return self.emission_PD, self.states
# Compute transition model using ConditionalProbDist with the estimator:
# Lidstone probability distribution with +0.01 added to the sample count for each bin and an extra bin
def transition_model(self, train_data):
"""
Compute an transition model using a ConditionalProbDist.
:param train_data: The training dataset, a list of sentences with tags
:type train_data: list(list(tuple(str,str)))
:return: The transition probability distribution
:rtype: ConditionalProbDist
"""
# TODO: prepare the data
data = []
tagged_sentences = train_data
for s in tagged_sentences:
data.append(("<s>",s[0][1])) # s.insert(0, "<s>")
for i in range(len(s)-1):
data.append((s[i][1], s[i+1][1]))
data.append((s[len(s)-1][1],"</s>")) # s.insert((len(s)-1), "</s>")
# The data object should be an array of tuples of conditions and observations,
# in our case the tuples will be of the form (tag_(i),tag_(i+1)).
# DON'T FORGET TO ADD THE START SYMBOL <s> and the END SYMBOL </s>
# for s in tagged_sentences:
# (["<s>"] + s + ["</s>"])
# tagged_sentencts: list(list(str or tuple))
# tagGenerators=(((s[i][1],s[i+1][1]) for i in range(len(s)-1)) for s in tagged_sentences)
# tagGenerators is an iterator of iterators of pairs of tags
# data = itertools.chain.from_iterable(tagGenerators)
# TODO compute the transition model
transition_FD = ConditionalFreqDist(data)
lidstone_estimator = lambda fd: LidstoneProbDist(fd, 0.01, fd.B() + 1)
self.transition_PD = ConditionalProbDist(transition_FD, lidstone_estimator)
# print (data)
return self.transition_PD
def test_emission(self):
print ("test emission")
transition_PD = self.transition_model(self.train_data)
t1 = -self.emission_PD['NOUN'].logprob('fulton') # the printed result is positive = logprob return negatives
print(t1)
def test_transition(self):
print ("test transition")
transition_PD = self.transition_model(self.train_data)
t1 = -transition_PD['<s>'].logprob('NOUN')
t2 = -transition_PD['VERB'].logprob('</s>')
t3 = -transition_PD['NOUN'].logprob('VERB')
print(t1,t2,t3)
# Train the HMM
def train(self):
"""
Trains the HMM from the training data
"""
self.emission_model(self.train_data)
self.transition_model(self.train_data)
# Part B: Implementing the Viterbi algorithm.
# Initialise data structures for tagging a new sentence.
# Describe the data structures with comments.
# Use the models stored in the variables: self.emission_PD and self.transition_PD
# Input: first word in the sentence to tag
def initialise(self, observation):
"""
Initialise data structures for tagging a new sentence.
:param observation: the first word in the sentence to tag
:type observation: str
"""
# use costs (-log-base-2 probabilities)
# viterbi is a list if libraries, i want it to store a T*N table
# viterbi[time step][ending state]
self.viterbi = [{}]
# backpointer library
self.backpointer = {} # a list of tags
# At initialise, cost with +logprob or *prob?
for state in self.states:
# transition from <s> to observation
# self.viterbi[0][state] = self.transition_PD["<s>"].prob(state) * self.transition_PD[state].prob(observation)
self.viterbi[0][state] = self.transition_PD["<s>"].logprob(state) + self.emission_PD[state].logprob(observation)
self.backpointer[state] = [state]
# Tag a new sentence using the trained model and already initialised data structures.
# Use the models stored in the variables: self.emission_PD and self.transition_PD.
# Update the self.viterbi and self.backpointer datastructures.
# Describe your implementation with comments.
# Input: list of words
def tag(self, observations):
"""
Tag a new sentence using the trained model and already initialised data structures.
:param observations: List of words (a sentence) to be tagged
:type observations: list(str)
:return: List of tags corresponding to each word of the input
"""
tags = []
index = 0
current_decision = [] # 0.8959
for t in range(1, len(observations)):
self.viterbi.append({})
newbackpointer = {} # index-state key-the list of my states
for state in self.states:
# viterbi min
# logprob return negative numbers, adding more negative value => want a result closer to + => max
(prob,prob_state) = max(
[(self.viterbi[t-1][previous_state] + self.transition_PD[previous_state].logprob(state)
+ self.emission_PD[state].logprob(observations[t]),previous_state) for previous_state in self.states])
self.viterbi[t][state] = prob # update the probability
# backpointer
newbackpointer[state] = self.backpointer[prob_state] + [state]
self.backpointer = newbackpointer
# Return the tag sequence corresponding to the best path as a list.
# viterbi[time step][ending state]
# choose the best match
# termination cost
for state in self.states:
self.viterbi[len(observations) -1][state] += self.transition_PD["<\s>"].logprob(state)
# print(self.viterbi[len(observations) -1][state]) # like -200
(probability,state) = max([(self.viterbi[len(observations) -1][state], state) for state in self.states])
# Reconstruct the tag sequence using the backpointer list.
# Return the tag sequence corresponding to the best path as a list.
tags = self.backpointer[state]
return tags
def answer_question4b():
""" Report a tagged sequence that is incorrect
:rtype: str
:return: your answer [max 280 chars]"""
# entries saves the first 10 incorrectly tagged sentences with their correct version
entries = []
incorrect = []
for sentence in test_data_universal:
s = [word.lower() for (word, tag) in sentence]
model.initialise(s[0])
tags = model.tag(s)
# append till 10 entries
for ((word,gold),tag) in zip(sentence,tags):
if tag != gold:
incorrect.append(tags)
entries.append(sentence)
break
if(len(entries) >= 10):
break
# to print the zip file
# print('length of entries is %f\n'%len(entries))
# for e,o in zip(entries,incorrect):
# print(e)
# print(o)
tagged_sequence = incorrect[0]
correct_sequence = entries[0]
# Why do you think the tagger tagged this example incorrectly?
answer = inspect.cleandoc("The Fulton County, if tagged correctly, should be DET NOUN NOUN, while tagger returns DET ADJ NOUN. Fulton is tagged wrong because it is tagged highly based on its previous word, however, in this case Fulton County should be regarded as a multiple-word phrase with both being NOUN")[0:280]
return tagged_sequence, correct_sequence, answer
def answer_question5():
"""Suppose you have a hand-crafted grammar that has 100% coverage on
constructions but less than 100% lexical coverage.
How could you use a POS tagger to ensure that the grammar
produces a parse for any well-formed sentence,
even when it doesn't recognise the words within that sentence?
:rtype: str
:return: your answer [max 500 chars]"""
return inspect.cleandoc("Using Lidstone and negative log probability, the model returns us the tag which is most probable given the tag of the previous word. If my tagger does recognize the word, it probably does better because we analyze the current state by transition and emission model. It may not do better if there is ambiguity of which tag the word could have. Given a word unrecognized, my approach might not be better because we cannot infer the state with emission model being considered P(o|state).")[0:500]
# Useful for testing
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
# http://stackoverflow.com/a/33024979
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def answers():
global tagged_sentences_universal, test_data_universal, \
train_data_universal, model, test_size, train_size, ttags, \
correct, incorrect, accuracy, \
good_tags, bad_tags, answer4b, answer5
# Load the Brown corpus with the Universal tag set.
tagged_sentences_universal = brown.tagged_sents(categories='news', tagset='universal')
# Divide corpus into train and test data.
test_size = 1000
train_size = len(tagged_sentences_universal) - 1000
test_data_universal = tagged_sentences_universal[:test_size]
train_data_universal = tagged_sentences_universal[-train_size:]
# Create instance of HMM class and initialise the training and test sets.
model = HMM(train_data_universal, test_data_universal)
# Train the HMM.
model.train()
# Inspect the model to see if emission_PD and transition_PD look plausible
print('states: %s\n'%model.states)
# Add other checks
######
# Try the model, and test its accuracy [won't do anything useful
# until you've filled in the tag method
######
# self designed tests
# model.test_emission()
# model.test_transition()
s='the cat in the hat came back'.split()
# s='Tell me olo cat is gooood'.split()
model.initialise(s[0])
ttags = model.tag(s)
print("Tag a trial sentence")
print(list(zip(s,ttags)))
# check the model's accuracy (% correct) using the test set
correct = 0
incorrect = 0
for sentence in test_data_universal:
s = [word.lower() for (word, tag) in sentence]
model.initialise(s[0])
tags = model.tag(s)
for ((word,gold),tag) in zip(sentence,tags):
if tag == gold:
correct += 1
else:
incorrect += 1
accuracy = (correct / (correct + incorrect))
print('Tagging accuracy for test set of %s sentences: %.4f'%(test_size,accuracy))
# Print answers for 4b and 5
bad_tags, good_tags, answer4b = answer_question4b()
print('\nAn incorrect tagged sequence is:')
print(bad_tags)
print('The correct tagging of this sentence would be:')
print(good_tags)
print('\nA possible reason why this error may have occurred is:')
print(answer4b[:280])
answer5=answer_question5()
print('\nFor Q5:')
print(answer5[:500])
if __name__ == '__main__':
answers()