예제 #1
0
    def vectorize_stories(self, data):
        """
        Convert (story, query, answer) word data into vectors.

        Args:
            data (tuple) : Tuple of story, query, answer word data.

        Returns:
            tuple : Tuple of story, query, answer vectors.
        """
        s, q, a = [], [], []
        for story, query, answer in data:
            s.append(self.words_to_vector(story))
            q.append(self.words_to_vector(query))
            a.append(self.one_hot_vector(answer))

        s = Text.pad_sentences(s, self.story_maxlen)
        q = Text.pad_sentences(q, self.query_maxlen)
        a = np.array(a)
        return (s, q, a)
예제 #2
0
    def vectorize_stories(self, data):
        """
        Convert (story, query, answer) word data into vectors.

        Args:
            data (tuple) : Tuple of story, query, answer word data.

        Returns:
            tuple : Tuple of story, query, answer vectors.
        """
        s, q, a = [], [], []
        for story, query, answer in data:
            s.append(self.words_to_vector(story))
            q.append(self.words_to_vector(query))
            a.append(self.one_hot_vector(answer))

        s = Text.pad_sentences(s, self.story_maxlen)
        q = Text.pad_sentences(q, self.query_maxlen)
        a = np.array(a)
        return (s, q, a)
예제 #3
0
파일: demo.py 프로젝트: mydaisy2/neon
print ex_answer

while True:
    # ask user for story and question
    story_lines = []
    line = raw_input("\nPlease enter a story:\n")
    while line != "":
        story_lines.append(line)
        line = raw_input()
    story = ("\n".join(story_lines)).strip()

    question = raw_input("Please enter a question:\n")

    # convert user input into a suitable network input
    vectorize = lambda words, max_len: \
        be.array(Text.pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len))
    s = vectorize(story, babi.story_maxlen)
    q = vectorize(question, babi.query_maxlen)

    # get prediction probabilities with forward propagation
    probs = model_inference.fprop(x=(s, q), inference=True).get()

    # get top k answers
    top_k = -min(5, babi.vocab_size)
    max_indices = np.argpartition(probs, top_k, axis=0)[top_k:]
    max_probs = probs[max_indices]
    sorted_idx = max_indices[np.argsort(max_probs, axis=0)]

    print "\nAnswer:"
    for idx in reversed(sorted_idx):
        idx = int(idx)
예제 #4
0
파일: demo.py 프로젝트: bin2000/neon
print ex_answer

while True:
    # ask user for story and question
    story_lines = []
    line = raw_input("\nPlease enter a story:\n")
    while line != "":
        story_lines.append(line)
        line = raw_input()
    story = ("\n".join(story_lines)).strip()

    question = raw_input("Please enter a question:\n")

    # convert user input into a suitable network input
    vectorize = lambda words, max_len: \
        be.array(Text.pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len))
    s = vectorize(story, babi.story_maxlen)
    q = vectorize(question, babi.query_maxlen)

    # get prediction probabilities with forward propagation
    probs = model_inference.fprop(x=(s, q), inference=True).get()

    # get top k answers
    top_k = -min(5, babi.vocab_size)
    max_indices = np.argpartition(probs, top_k, axis=0)[top_k:]
    max_probs = probs[max_indices]
    sorted_idx = max_indices[np.argsort(max_probs, axis=0)]

    print "\nAnswer:"
    for idx in reversed(sorted_idx):
        idx = int(idx)