class evaluator: def __init__(self, name, vocabulary, init, extract, layer): self.opt = pickle.load(open('{}.pkl'.format(name), "rb")) self.opt.batch_size = 1 self.opt.seq_length = 1 self.reader = Vocabulary(self.opt.tokens, vocabulary, max_word_l=self.opt.max_word_l) if extract: self.model = LSTMCNN_print(self.opt, extract, layer) print self.model.summary() self.model.load_weights('{}.h5'.format(name), by_name=True) else: self.model = LSTMCNN(self.opt) self.model.load_weights('{}.h5'.format(name)) print self.model.summary() if init: self.state_mean = np.load(init) else: self.state_mean = None def logprob(self, line): x, y = self.reader.get_input(line) nwords = len(y) if self.state_mean is not None: self.model.set_states_value(self.state_mean) return self.model.evaluate(x, y, batch_size=1, verbose=0), nwords def get_embedding(self, line): x, y = self.reader.get_input(line) if self.state_mean is not None: self.model.set_states_value(self.state_mean) return self.model.predict(x, batch_size=1, verbose=0)
class evaluator: def __init__(self, name, vocabulary, init): self.opt = pickle.load(open('{}.pkl'.format(name), "rb")) self.opt.batch_size = 1 self.opt.seq_length = 1 self.reader = Vocabulary(self.opt.tokens, vocabulary, max_word_l=self.opt.max_word_l) self.model = LSTMCNN(self.opt) self.model.load_weights('{}.h5'.format(name)) if init: self.state_mean = np.load(init) else: self.state_mean = None def logprob(self, line): x, y = self.reader.get_input(line) nwords = len(y) if self.state_mean is not None: self.model.set_states_value(self.state_mean) return self.model.evaluate(x, y, batch_size=1, verbose=0), nwords