def vectorize_stories(self, data): """ Convert (story, query, answer) word data into vectors. Args: data (tuple) : Tuple of story, query, answer word data. Returns: tuple : Tuple of story, query, answer vectors. """ s, q, a = [], [], [] for story, query, answer in data: s.append(self.words_to_vector(story)) q.append(self.words_to_vector(query)) a.append(self.one_hot_vector(answer)) s = Text.pad_sentences(s, self.story_maxlen) q = Text.pad_sentences(q, self.query_maxlen) a = np.array(a) return (s, q, a)
def vectorize_stories(self, data): """ Convert (story, query, answer) word data into vectors. Args: data (tuple) : Tuple of story, query, answer word data. Returns: tuple : Tuple of story, query, answer vectors. """ s, q, a = [], [], [] for story, query, answer in data: s.append(self.words_to_vector(story)) q.append(self.words_to_vector(query)) a.append(self.one_hot_vector(answer)) s = Text.pad_sentences(s, self.story_maxlen) q = Text.pad_sentences(q, self.query_maxlen) a = np.array(a) return (s, q, a)
print ex_answer while True: # ask user for story and question story_lines = [] line = raw_input("\nPlease enter a story:\n") while line != "": story_lines.append(line) line = raw_input() story = ("\n".join(story_lines)).strip() question = raw_input("Please enter a question:\n") # convert user input into a suitable network input vectorize = lambda words, max_len: \ be.array(Text.pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len)) s = vectorize(story, babi.story_maxlen) q = vectorize(question, babi.query_maxlen) # get prediction probabilities with forward propagation probs = model_inference.fprop(x=(s, q), inference=True).get() # get top k answers top_k = -min(5, babi.vocab_size) max_indices = np.argpartition(probs, top_k, axis=0)[top_k:] max_probs = probs[max_indices] sorted_idx = max_indices[np.argsort(max_probs, axis=0)] print "\nAnswer:" for idx in reversed(sorted_idx): idx = int(idx)
print ex_answer while True: # ask user for story and question story_lines = [] line = raw_input("\nPlease enter a story:\n") while line != "": story_lines.append(line) line = raw_input() story = ("\n".join(story_lines)).strip() question = raw_input("Please enter a question:\n") # convert user input into a suitable network input vectorize = lambda words, max_len: \ be.array(Text.pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len)) s = vectorize(story, babi.story_maxlen) q = vectorize(question, babi.query_maxlen) # get prediction probabilities with forward propagation probs = model_inference.fprop(x=(s, q), inference=True).get() # get top k answers top_k = -min(5, babi.vocab_size) max_indices = np.argpartition(probs, top_k, axis=0)[top_k:] max_probs = probs[max_indices] sorted_idx = max_indices[np.argsort(max_probs, axis=0)] print "\nAnswer:" for idx in reversed(sorted_idx): idx = int(idx)