Exemple #1
0
 def test_qaranker_local_integration(self):
     relations = Relations.read(self.qa_path + "/relations.txt")
     assert len(relations) == 4
     text_set = TextSet.read_csv(self.qa_path + "/question_corpus.csv")
     assert text_set.get_uris() == ["Q1", "Q2"]
     transformed = text_set.tokenize().normalize().word2idx(
     ).shape_sequence(5)
     relation_pairs = TextSet.from_relation_pairs(relations, transformed,
                                                  transformed)
     pair_samples = relation_pairs.get_samples()
     assert len(pair_samples) == 2
     for sample in pair_samples:
         assert list(sample.feature.shape) == [2, 10]
         assert np.allclose(sample.label.to_ndarray(),
                            np.array([[1.0], [0.0]]))
     relation_lists = TextSet.from_relation_lists(relations, transformed,
                                                  transformed)
     relation_samples = relation_lists.get_samples()
     assert len(relation_samples) == 2
     for sample in relation_samples:
         assert list(sample.feature.shape) == [2, 10]
         assert list(sample.label.shape) == [2, 1]
     knrm = KNRM(5,
                 5,
                 self.glove_path,
                 word_index=transformed.get_word_index())
     model = Sequential().add(TimeDistributed(knrm, input_shape=(2, 10)))
     model.compile("sgd", "rank_hinge")
     model.fit(relation_pairs, batch_size=2, nb_epoch=2)
     print(knrm.evaluate_ndcg(relation_lists, 3))
     print(knrm.evaluate_map(relation_lists))
Exemple #2
0
 def build_model(self):
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.summary()
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
Exemple #3
0
 def test_regularizer(self):
     model = ZSequential()
     model.add(
         ZLayer.Dense(16,
                      W_regularizer=regularizers.l2(0.001),
                      activation='relu',
                      input_shape=(10000, )))
     model.summary()
     model.compile(optimizer='rmsprop',
                   loss='binary_crossentropy',
                   metrics=['acc'])
def buildmodel():
    print("Now we build the model")
    model = Sequential()
    model.add(
        Convolution2D(32,
                      8,
                      8,
                      subsample=(4, 4),
                      border_mode='same',
                      input_shape=(img_rows, img_cols,
                                   img_channels)))  # 80*80*4
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dense(2))

    model.compile(loss='mse', optimizer='adam')
    print("We finish building the model")
    return model
Exemple #5
0
model.add(LSTM(
    input_shape=(x_train.shape[1], x_train.shape[-1]),
    output_dim=20,
    return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(
    10,
    return_sequences=False))
model.add(Dropout(0.2))

model.add(Dense(
    output_dim=1))

model.compile(loss='mse', optimizer='rmsprop')

%%time
# Train the model
print("Training begins.")
model.fit(
    x_train,
    y_train,
    batch_size=1024,
    nb_epoch=20)
print("Training completed.")

# create the list of difference between prediction and test data
diff=[]
ratio=[]
predictions = model.predict(x_test)
    train_relations = Relations.read(options.data_path + "/relation_train.csv",
                                     sc, int(options.partition_num))
    train_set = TextSet.from_relation_pairs(train_relations, q_set, a_set)
    validate_relations = Relations.read(options.data_path + "/relation_valid.csv",
                                        sc, int(options.partition_num))
    validate_set = TextSet.from_relation_lists(validate_relations, q_set, a_set)

    if options.model:
        knrm = KNRM.load_model(options.model)
    else:
        word_index = a_set.get_word_index()
        knrm = KNRM(int(options.question_length), int(options.answer_length),
                    options.embedding_file, word_index)
    model = Sequential().add(
        TimeDistributed(
            knrm,
            input_shape=(2, int(options.question_length) + int(options.answer_length))))
    model.compile(optimizer=SGD(learningrate=float(options.learning_rate)),
                  loss="rank_hinge")
    for i in range(0, int(options.nb_epoch)):
        model.fit(train_set, batch_size=int(options.batch_size), nb_epoch=1)
        knrm.evaluate_ndcg(validate_set, 3)
        knrm.evaluate_ndcg(validate_set, 5)
        knrm.evaluate_map(validate_set)

    if options.output_path:
        knrm.save_model(options.output_path + "/knrm.model")
        a_set.save_word_index(options.output_path + "/word_index.txt")
        print("Trained model and word dictionary saved")
    sc.stop()
Exemple #7
0
print("Created Train and Test Df\n")

predictionColumn = 'slotOccupancy'

x = trainDf.drop(columns=[predictionColumn])
inputs = len(x.columns)

y = trainDf[[predictionColumn]]
outputs = len(y.columns)

model = Sequential()
model.add(Dense(output_dim=inputs, activation="relu", input_shape=(inputs, )))
model.add(Dense(output_dim=inputs, activation="relu"))
model.add(Dense(output_dim=outputs))

model.compile(optimizer="adam", loss="mean_squared_error")

model.summary()
print("Created Sequential Model!\n")

xNumpy = x.to_numpy()
yNumpy = y.to_numpy()
# model.fit(x=xNumpy, y=yNumpy, nb_epoch=1, distributed=False)

import tensorflow as tf

weights = np.array(model.get_weights(), dtype=object)
print(weights)

tfModel = tf.keras.models.Sequential()