Exemple #1
0
 def test_qaranker_local_integration(self):
     relations = Relations.read(self.qa_path + "/relations.txt")
     assert len(relations) == 4
     text_set = TextSet.read_csv(self.qa_path + "/question_corpus.csv")
     assert text_set.get_uris() == ["Q1", "Q2"]
     transformed = text_set.tokenize().normalize().word2idx(
     ).shape_sequence(5)
     relation_pairs = TextSet.from_relation_pairs(relations, transformed,
                                                  transformed)
     pair_samples = relation_pairs.get_samples()
     assert len(pair_samples) == 2
     for sample in pair_samples:
         assert list(sample.feature.shape) == [2, 10]
         assert np.allclose(sample.label.to_ndarray(),
                            np.array([[1.0], [0.0]]))
     relation_lists = TextSet.from_relation_lists(relations, transformed,
                                                  transformed)
     relation_samples = relation_lists.get_samples()
     assert len(relation_samples) == 2
     for sample in relation_samples:
         assert list(sample.feature.shape) == [2, 10]
         assert list(sample.label.shape) == [2, 1]
     knrm = KNRM(5,
                 5,
                 self.glove_path,
                 word_index=transformed.get_word_index())
     model = Sequential().add(TimeDistributed(knrm, input_shape=(2, 10)))
     model.compile("sgd", "rank_hinge")
     model.fit(relation_pairs, batch_size=2, nb_epoch=2)
     print(knrm.evaluate_ndcg(relation_lists, 3))
     print(knrm.evaluate_map(relation_lists))
Exemple #2
0
model.add(LSTM(
    10,
    return_sequences=False))
model.add(Dropout(0.2))

model.add(Dense(
    output_dim=1))

model.compile(loss='mse', optimizer='rmsprop')

%%time
# Train the model
print("Training begins.")
model.fit(
    x_train,
    y_train,
    batch_size=1024,
    nb_epoch=20)
print("Training completed.")

# create the list of difference between prediction and test data
diff=[]
ratio=[]
predictions = model.predict(x_test)
p = predictions.collect()
for u in range(len(y_test)):
    pr = p[u][0]
    ratio.append((y_test[u]/pr)-1)
    diff.append(abs(y_test[u]- pr))
    
# plot the predicted values and actual values (for the test data)
    train_relations = Relations.read(options.data_path + "/relation_train.csv",
                                     sc, int(options.partition_num))
    train_set = TextSet.from_relation_pairs(train_relations, q_set, a_set)
    validate_relations = Relations.read(options.data_path + "/relation_valid.csv",
                                        sc, int(options.partition_num))
    validate_set = TextSet.from_relation_lists(validate_relations, q_set, a_set)

    if options.model:
        knrm = KNRM.load_model(options.model)
    else:
        word_index = a_set.get_word_index()
        knrm = KNRM(int(options.question_length), int(options.answer_length),
                    options.embedding_file, word_index)
    model = Sequential().add(
        TimeDistributed(
            knrm,
            input_shape=(2, int(options.question_length) + int(options.answer_length))))
    model.compile(optimizer=SGD(learningrate=float(options.learning_rate)),
                  loss="rank_hinge")
    for i in range(0, int(options.nb_epoch)):
        model.fit(train_set, batch_size=int(options.batch_size), nb_epoch=1)
        knrm.evaluate_ndcg(validate_set, 3)
        knrm.evaluate_ndcg(validate_set, 5)
        knrm.evaluate_map(validate_set)

    if options.output_path:
        knrm.save_model(options.output_path + "/knrm.model")
        a_set.save_word_index(options.output_path + "/word_index.txt")
        print("Trained model and word dictionary saved")
    sc.stop()