forked from nOkuda/activetm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
submain.py
148 lines (134 loc) · 5.77 KB
/
submain.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
"""Code to run experiment"""
import argparse
import datetime
import os
import pickle
import random
import socket
import time
import numpy as np
from activetm.active import evaluate
from activetm.active import select
from activetm import models
from activetm import utils
def partition_data_ids(num_docs, rng, settings):
"""Partition dataset into test, train, and unlabeled sets"""
TEST_SIZE = int(settings['testsize'])
START_LABELED = int(settings['startlabeled'])
shuffled_doc_ids = list(range(num_docs))
rng.shuffle(shuffled_doc_ids)
test_doc_ids = list(shuffled_doc_ids[:TEST_SIZE])
labeled_doc_ids = list(shuffled_doc_ids[TEST_SIZE:TEST_SIZE+START_LABELED])
unlabeled_doc_ids = set(shuffled_doc_ids[TEST_SIZE+START_LABELED:])
return test_doc_ids, labeled_doc_ids, unlabeled_doc_ids
def _run():
"""Run experiment"""
parser = argparse.ArgumentParser(description='Job runner for ActiveTM '
'experiments')
parser.add_argument('settings', help=\
'''the path to a file containing settings, as described in \
README.md in the root ActiveTM directory''')
parser.add_argument('outputdir', help='directory for output')
parser.add_argument('label', help='identifying label')
parser.add_argument('seed', default=-1, type=int, nargs='?')
args = parser.parse_args()
# print('Parsed arguments')
settings = utils.parse_settings(args.settings)
# print('Parsed settings')
trueoutputdir = os.path.join(args.outputdir, settings['group'])
if not os.path.exists(trueoutputdir):
try:
os.makedirs(trueoutputdir)
except OSError:
pass
# print('Ensured true output directory exists')
filename = socket.gethostname()+'.'+str(os.getpid())
runningfile = os.path.join(args.outputdir, 'running',
filename)
try:
with open(runningfile, 'w') as outputfh:
outputfh.write('running')
# print('Created running mark')
start = time.time()
input_pickle = os.path.join(args.outputdir, utils.get_pickle_name(args.settings))
with open(input_pickle, 'rb') as ifh:
dataset = pickle.load(ifh)
# print('Got pickle')
if args.seed == -1:
rng = random.Random(int(settings['seed']))
else:
rng = random.Random(args.seed)
# print('Set random seed: ', args.seed)
model = models.build(rng, settings)
# print('Built model')
test_doc_ids, labeled_doc_ids, unlabeled_doc_ids =\
partition_data_ids(dataset.num_docs, rng, settings)
test_labels = []
test_words = []
for t in test_doc_ids:
test_labels.append(dataset.labels[dataset.titles[t]])
test_words.append(dataset.doc_tokens(t))
test_labels_mean = np.mean(test_labels)
known_labels = []
for t in labeled_doc_ids:
known_labels.append(dataset.labels[dataset.titles[t]])
# print('Set up initial sets')
SELECT_METHOD = select.factory[settings['select']]
END_LABELED = int(settings['endlabeled'])
LABEL_INCREMENT = int(settings['increment'])
CAND_SIZE = int(settings['candsize'])
results = []
end = time.time()
init_time = datetime.timedelta(seconds=end-start)
start = time.time()
# sandt = select_and_train
sandt_start = time.time()
model.train(dataset, labeled_doc_ids, known_labels)
# print('Trained model')
sandt_end = time.time()
count = 0
predictions = evaluate.get_predictions(model, test_words)
pr2 = evaluate.pR2(predictions,
test_labels,
test_labels_mean)
maes = evaluate.mean_absolute_errors(predictions, test_labels)
np.savetxt(utils.get_mae_out_name(trueoutputdir, args.label, count),
maes)
results.append([len(labeled_doc_ids),
datetime.timedelta(seconds=time.time()-start).total_seconds(),
datetime.timedelta(seconds=sandt_end-sandt_start).total_seconds(),
pr2])
while len(labeled_doc_ids) < END_LABELED and len(unlabeled_doc_ids) > 0:
count += 1
sandt_start = time.time()
# must make unlabeled_doc_ids (which is a set) into a list
candidates = select.reservoir(list(unlabeled_doc_ids), rng, CAND_SIZE)
chosen = SELECT_METHOD(dataset, labeled_doc_ids, candidates, model,
rng, LABEL_INCREMENT)
for c in chosen:
known_labels.append(dataset.labels[dataset.titles[c]])
labeled_doc_ids.append(c)
unlabeled_doc_ids.remove(c)
model.train(dataset, labeled_doc_ids, known_labels, True)
sandt_end = time.time()
predictions = evaluate.get_predictions(model, test_words)
pr2 = evaluate.pR2(predictions, test_labels, test_labels_mean)
maes = evaluate.mean_absolute_errors(predictions, test_labels)
np.savetxt(utils.get_mae_out_name(trueoutputdir, args.label, count),
maes)
results.append([len(labeled_doc_ids),
datetime.timedelta(seconds=time.time()-start).total_seconds(),
datetime.timedelta(seconds=sandt_end-sandt_start).total_seconds(),
pr2])
model.cleanup()
output = []
output.append('# init time: {:s}'.format(str(init_time)))
for result in results:
output.append('\t'.join([str(r) for r in result]))
output.append('')
with open(os.path.join(trueoutputdir, args.label), 'w') as ofh:
ofh.write('\n'.join(output))
finally:
os.remove(runningfile)
if __name__ == '__main__':
_run()