Exemplo n.º 1
0
list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation, maxlen=maxlen)

list_tokenized_total = tokenizer.texts_to_sequences(X_total)
input_total = sequence.pad_sequences(list_tokenized_total, maxlen=maxlen)

print("model1")
model1 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ltc.hdf5"
retrain_path = model_dir_retrain + "model_ltc_{epoch:02d}.hdf5"
model1.load_weights(file_path)
checkpoint = ModelCheckpoint(retrain_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model1.fit(input_total, Y_total_ltc, batch_size=batch_size, epochs=epochs,
                     validation_data=(input_validation, Y_validation_ltc), callbacks=callbacks_list, verbose=2)
del model1
del history
gc.collect()
K.clear_session()

print("model2")
model2 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ldfbd.hdf5"
retrain_path = model_dir_retrain + "model_ldfbd_{epoch:02d}.hdf5"
model2.load_weights(file_path)
checkpoint = ModelCheckpoint(retrain_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model2.fit(input_total, Y_total_ldfbd, batch_size=batch_size, epochs=epochs,
                     validation_data=(input_validation, Y_validation_ldfbd), callbacks=callbacks_list, verbose=2)
Exemplo n.º 2
0
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model17")
model17 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_dl_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model17.fit(input_train,
                      Y_train_dl,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(input_validation, Y_validation_dl),
                      callbacks=callbacks_list,
                      verbose=2)
del model17
del history
gc.collect()
K.clear_session()

print("model18")
model18 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_dr_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model18.fit(input_train,
Exemplo n.º 3
0
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model7")
model7 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ssp_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model7.fit(input_train,
                     Y_train_ssp,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_ssp),
                     callbacks=callbacks_list,
                     verbose=2)
del model7
del history
gc.collect()
K.clear_session()

print("model8")
model8 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_pl_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model8.fit(input_train,
Y_validation_owta = pd.get_dummies(validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_train = tokenizer.texts_to_sequences(X_train)
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation, maxlen=maxlen)


print("model9")
model9 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_pce_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model9.fit(input_train, Y_train_pce, batch_size=batch_size, epochs=epochs,
                     validation_data=(input_validation, Y_validation_pce), callbacks=callbacks_list, verbose=2)
del model9
del history
gc.collect()
K.clear_session()

print("model10")
model10 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_pd_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model10.fit(input_train, Y_train_pd, batch_size=batch_size, epochs=epochs,
                      validation_data=(input_validation, Y_validation_pd), callbacks=callbacks_list, verbose=2)
del model10
del history
Exemplo n.º 5
0
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model19")
model19 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ooe_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model19.fit(input_train,
                      Y_train_ooe,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(input_validation, Y_validation_ooe),
                      callbacks=callbacks_list,
                      verbose=2)
del model19
del history
gc.collect()
K.clear_session()

print("model20")
model20 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_owta_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model1")
model1 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ltc_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model1.fit(input_train,
                     Y_train_ltc,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_ltc),
                     callbacks=callbacks_list,
                     verbose=2)
del model1
del history
gc.collect()
K.clear_session()

print("model2")
model2 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ldfbd_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model2.fit(input_train,
Exemplo n.º 7
0
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model11")
model11 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ed_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model11.fit(input_train,
                      Y_train_ed,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(input_validation, Y_validation_ed),
                      callbacks=callbacks_list,
                      verbose=2)
del model11
del history
gc.collect()
K.clear_session()

print("model12")
model12 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_en_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
Exemplo n.º 8
0
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model15")
model15 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_dp_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model15.fit(input_train,
                      Y_train_dp,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(input_validation, Y_validation_dp),
                      callbacks=callbacks_list,
                      verbose=2)
del model15
del history
gc.collect()
K.clear_session()

print("model16")
model16 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_dt_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model16.fit(input_train,
Exemplo n.º 9
0
list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model13")
model13 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_es_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]

history = model13.fit(input_train,
                      Y_train_es,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(input_validation, Y_validation_es),
                      callbacks=callbacks_list,
                      verbose=2)
del model13
del history
gc.collect()
K.clear_session()

print("model14")
model14 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ec_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
Exemplo n.º 10
0
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model5")
model5 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_swa_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model5.fit(input_train,
                     Y_train_swa,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_swa),
                     callbacks=callbacks_list,
                     verbose=2)
del model5
del history
gc.collect()
K.clear_session()

print("model6")
model6 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_spc_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model6.fit(input_train,
Exemplo n.º 11
0
    embedding_vector = embeddings_index.get(
        word
    )  #the function of embedding_index.get(word) is get the vector of the word.
    if embedding_vector is not None: embeddings_matrix[i] = embedding_vector
#for example, word_index:{'the':1}, embedding_index:{'the': vector}, embedding_matrix:{1: vector}

#line 86-128 one-hot enconding for all aspects in train and validation dataset

Y_train = data.iloc[:, 22:].values
Y_validation = validation.iloc[:, 22:].values

print("model_rcnn")
model = TextClassifier().model(embeddings_matrix, maxlen, word_index, 80)
file_path = model_dir + "model_rcnn_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=1, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model.fit(input_train,
                    Y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    validation_data=(input_validation, Y_validation),
                    callbacks=callbacks_list,
                    verbose=1)
del model1
del history
gc.collect()  #
K.clear_session()

#The basic usage of map see: https://www.runoob.com/python/python-func-map.html -> line 40.
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model3")
model3 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_letf_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model3.fit(input_train,
                     Y_train_letf,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_letf),
                     callbacks=callbacks_list,
                     verbose=2)
del model3
del history
gc.collect()
K.clear_session()

print("model4")
model4 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_swt_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model4.fit(input_train,