/
main_h.py
161 lines (124 loc) · 5.84 KB
/
main_h.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
from Classes.Sentences import Paragraphs, Sentences
from Features.example_features import *
from functools import partial
from classifier.ErrorAnalysis import *
from classifier.loglinear.Loglinear import LoglinearModel
from utils.Utils import *
import cloud.serialization.cloudpickle as cp
from pprint import pprint
def main():
start = time.time()
### READ ###########################################################################################################
print '\n------------'
print 'Reading data'
print '------------\n'
all_train_sentences = Paragraphs("Dataset/Train/").all_sentences()
###
read_end = time.time()
print 'Reading time:', read_end - start, 's'
####################################################################################################################
### PREPROCESS #####################################################################################################
print '\n------------------'
print 'Preprocessing data'
print '------------------\n'
used_fraction = 0.005
train_fraction = 0.8
none_fraction = 0.05
print 'Fraction of data used:', used_fraction
print 'Fraction of data for training:', train_fraction
print 'Fraction of None-labelled samples used:', none_fraction
(used_sentences, _) = all_train_sentences.split_randomly(used_fraction)
(train_sentences, test_sentences) = used_sentences.split_randomly(train_fraction)
all_train_tokens = train_sentences.tokens()
subsampled_tokens = subsample_none(all_train_tokens, none_fraction)
print 'Number of training tokens:', len(subsampled_tokens)
class_dict = get_class_dict(subsampled_tokens)
stem_dict = get_stem_dict(subsampled_tokens)
word_dict = get_word_dict(subsampled_tokens)
ngram_order = 2
char_ngram_dict = get_char_ngram_dict(subsampled_tokens, ngram_order)
ngram_dict = get_ngram_dict(all_train_tokens, ngram_order)
trigger_dict = get_trigger_dict(subsampled_tokens)
arg_word_dict = get_arg_word_dict(subsampled_tokens)
feature_strings = ['word_template_feature',
'word_class_template_feature',
'capital_letter_feature',
'token_in_trigger_dict_feature',
'number_in_token_feature',
'token_in_protein_feature',
'token_is_after_dash_feature',
'pos_class_feature',
'character_ngram_feature']
phi = partial(set_of_features, stem_dict, word_dict, class_dict, trigger_dict, ngram_order, char_ngram_dict,
ngram_dict, feature_strings)
print 'Used features:', feature_strings
###
preprocess_end = time.time()
print 'Preprocessing time:', preprocess_end - read_end, 's'
####################################################################################################################
### TRAIN ##########################################################################################################
print '\n-------------'
print 'Training data'
print '-------------\n'
alpha = 0.2
max_iterations = 10
print 'Alpha =', alpha
print 'Max iterations =', max_iterations
classifier = LoglinearModel(lambda t: t.event_candidate, phi, class_dict.keys(), alpha, max_iterations)\
.train(subsampled_tokens)
###
train_end = time.time()
print 'Training time:', train_end - read_end, 's'
####################################################################################################################
#### TEST ###########################################################################################################
print '\n-------'
print 'Testing'
print '-------\n'
all_test_tokens = test_sentences.tokens()
subsampled_test_tokens = all_test_tokens
print 'Number of test tokens:', len(subsampled_test_tokens)
predictions = classifier.predict_all(subsampled_test_tokens)
###
predict_end = time.time()
print 'Predict time:', predict_end - train_end, 's'
####################################################################################################################
### ERROR ANALYSIS #################################################################################################
print '\n-----------------'
print 'Analysing results'
print '-----------------\n'
true_labels = []
for token in all_test_tokens:
true_labels.append(token.event_candidate)
test_keys = class_dict.keys()
for label in test_keys:
print 'Analyzing label: ', label
precision_recall_f1(true_labels, predictions, label)
y_test = map(lambda t: t.event_candidate, all_test_tokens)
y_pred = predictions
# Computer our confusion matrix
cm2 = confusion_matrix(class_dict, y_test, y_pred)
pprint(cm2)
print cm2
none_index = class_dict['None']
classes = class_dict.keys()
for i in range(len(class_dict)):
print '\nCLASS: ', classes[i]
print 'Recall: ', label_recall(cm2, i)
print 'Precision: ', label_precision(cm2, i)
print 'F1: ', label_f1(cm2, i)
print '\n'
print 'Precision micro:', precision_micro(cm2, none_index)
print 'Recall micro:', recall_micro(cm2, none_index)
print 'F1 micro:', f1_micro(cm2, none_index)
print '\n'
print 'Precision macro:', precision_macro(cm2, none_index)
print 'Recall macro:', recall_macro(cm2, none_index)
print 'F1 macro:', f1_macro(cm2, none_index)
###
analysis_end = time.time()
print '\nAnalysis time:', analysis_end - predict_end, 's'
# ####################################################################################################################
#
cp.dump(classifier, open('classifier_' + time.strftime("%Y%m%d-%H%M%S") + '.p', 'wb'))
if __name__ == "__main__":
main()