예제 #1
0
def babi_handler(data_dir, task_number):
    """
    Handle for bAbI task.

    Args:
        data_dir (string) : Path to bAbI data directory.
        task_number (int) : The task ID from the bAbI dataset (1-20).

    Returns:
        BABI : Handler for bAbI task.
    """
    task = task_list[task_number - 1]
    return BABI(path=data_dir, task=task, subset=subset)
예제 #2
0
파일: demo.py 프로젝트: mydaisy2/neon
print ex_answer

while True:
    # ask user for story and question
    story_lines = []
    line = raw_input("\nPlease enter a story:\n")
    while line != "":
        story_lines.append(line)
        line = raw_input()
    story = ("\n".join(story_lines)).strip()

    question = raw_input("Please enter a question:\n")

    # convert user input into a suitable network input
    vectorize = lambda words, max_len: \
        be.array(Text.pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len))
    s = vectorize(story, babi.story_maxlen)
    q = vectorize(question, babi.query_maxlen)

    # get prediction probabilities with forward propagation
    probs = model_inference.fprop(x=(s, q), inference=True).get()

    # get top k answers
    top_k = -min(5, babi.vocab_size)
    max_indices = np.argpartition(probs, top_k, axis=0)[top_k:]
    max_probs = probs[max_indices]
    sorted_idx = max_indices[np.argsort(max_probs, axis=0)]

    print "\nAnswer:"
    for idx in reversed(sorted_idx):
        idx = int(idx)
예제 #3
0
파일: demo.py 프로젝트: bin2000/neon
print ex_answer

while True:
    # ask user for story and question
    story_lines = []
    line = raw_input("\nPlease enter a story:\n")
    while line != "":
        story_lines.append(line)
        line = raw_input()
    story = ("\n".join(story_lines)).strip()

    question = raw_input("Please enter a question:\n")

    # convert user input into a suitable network input
    vectorize = lambda words, max_len: \
        be.array(Text.pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len))
    s = vectorize(story, babi.story_maxlen)
    q = vectorize(question, babi.query_maxlen)

    # get prediction probabilities with forward propagation
    probs = model_inference.fprop(x=(s, q), inference=True).get()

    # get top k answers
    top_k = -min(5, babi.vocab_size)
    max_indices = np.argpartition(probs, top_k, axis=0)[top_k:]
    max_probs = probs[max_indices]
    sorted_idx = max_indices[np.argsort(max_probs, axis=0)]

    print "\nAnswer:"
    for idx in reversed(sorted_idx):
        idx = int(idx)
예제 #4
0
    choices=xrange(1, 21),
    help='the task ID to train/test on from bAbI dataset (1-20)')
parser.add_argument('--rlayer_type',
                    default='gru',
                    choices=['gru', 'lstm'],
                    help='type of recurrent layer to use (gru or lstm)')
args = parser.parse_args(gen_be=False)
args.batch_size = 32

task = task_list[args.task - 1]

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# load the bAbI dataset
babi = BABI(path=args.data_dir, task=task, subset=subset)
train_set = QA(*babi.train)
valid_set = QA(*babi.test)

# recurrent layer parameters (default gru)
rlayer_obj = GRU if args.rlayer_type == 'gru' else LSTM
rlayer_params = dict(output_size=100,
                     reset_cells=True,
                     init=GlorotUniform(),
                     init_inner=Orthonormal(0.5),
                     activation=Tanh(),
                     gate_activation=Logistic())

# if using lstm, swap the activation functions
if args.rlayer_type == 'lstm':
    rlayer_params.update(dict(activation=Logistic(), gate_activation=Tanh()))
예제 #5
0
파일: demo.py 프로젝트: NervanaSystems/neon
def vectorize(words, max_len):
    return be.array(pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len))
예제 #6
0
파일: demo.py 프로젝트: leo-lp/neon-1
def vectorize(words, max_len):
    return be.array(
        pad_sentences([babi.words_to_vector(BABI.tokenize(words))], max_len))