# save_weights_only=False, mode='auto', period=1) # tb = TensorBoard(log_dir=util.SUMMARY_DIR, histogram_freq=0.2, batch_size=batch_size, write_graph=True, # write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, # embeddings_metadata=None) # model.fit(X_train, Y_train, batch_size=batch_size, # epochs=100, verbose=0, callbacks=[csv_logger, tb, checkpoint], validation_data=(X_test, Y_test)) # print(X_test,Y_test) # score = model.evaluate(X_test, Y_test, verbose=1, batch_size=batch_size) # print(score) model_name = 'LR_TL_BI_cuDnnGRU_N196_GloVe' util = load_data(model_name, data_source='../Sentiment Analysis Data/Collected Dataset/' 'WorldCupDraw Preprocessed Dataset wo Dups.csv', data_label='text', target_label='positive_percentage') X = util.X Y = util.Y batch_size = 256 X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state=42) validation_size = int(len(X_test) * 0.20) model = gru_model(util) # model = load_model('models/LR_TL_BI_cuDnnLSTM_N196_GloVe_e20/10-59.01-134.28.hdf5') csv_logger = CSVLogger(util.LOG_DIR, append=True) checkpoint_name = util.MODEL_DIR + '/{epoch:02d}-{loss:.2f}-{val_loss:.2f}.hdf5' checkpoint = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1) tb = TensorBoard(log_dir=util.SUMMARY_DIR, histogram_freq=0.2, batch_size=batch_size, write_graph=True,
from keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint from keras.layers import Dense, Embedding, CuDNNLSTM, Bidirectional from keras.models import Sequential from sklearn.model_selection import StratifiedKFold from pandas import get_dummies from Embedding_Utilities import load_data util = load_data('TL_BI_cuDNNLSTM_L2_N196_GloVe_5f_final') lstm_out = 196 model = Sequential() model.add( Embedding(util.NB_WORDS, util.EMBEDDING_DIM, input_length=util.TWEET_LENGTH, dropout=0.2, weights=[util.embedding_matrix], trainable=False)) model.add(Bidirectional(CuDNNLSTM(lstm_out, return_sequences=True))) model.add(Bidirectional(CuDNNLSTM(lstm_out, go_backwards=True))) model.add(Dense(util.NUM_CLASSES, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) print(model.summary()) kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=util.RANDOM_SEED) cvscores = []
# # result = model.predict(X[x].reshape(1, X.shape[1]), batch_size=1, verbose=2)[0] # # if argmax(result) == argmax(Y[x]): # if argmax(Y[x]) == 0: # neg_correct += 1 # else: # pos_correct += 1 # # if argmax(Y[x]) == 0: # neg_cnt += 1 # else: # pos_cnt += 1 # # print("pos_acc", pos_correct / pos_cnt * 100, "%") # print("neg_acc", neg_correct / neg_cnt * 100, "%") model_to_load = "models/LR_TL_BI_cuDnnLSTM_N196_GloVe_e100/69-25.80-124.41.hdf5" util = load_data('LR_TL_BI_cuDnnLSTM_N196_GloVe_e100', data_source='../Sentiment Analysis Data/Collected Dataset/' 'WorldCupDraw Preprocessed Dataset wo Dups.csv', data_label='text', target_label='positive_percentage') X = asarray([ 'Germany vs. Mexico will be fun!', 'The moment England were drawn with Belgium... #bbcfootball' ]) X = util.convert_data(X) model = models.load_model(model_to_load) print(model.predict(X))
from keras import models from Embedding_Utilities import load_data from os.path import join import pandas as pd from numpy import argmax model_name = "TL_BI_cuDNNLSTM_L2_N196_GloVe_5f_final/5.02-0.24-0.27.hdf5" model_path = join('models/', model_name) util = load_data(model_name) replies_data = pd.read_csv( '../Sentiment Analysis Data/Collected Dataset/Replies_WorldCupDraw_Cleansed.csv' ) replies_tweet = util.convert_data(replies_data.text.values) model = models.load_model(model_path) batch_size = 256 result = model.predict(replies_tweet, batch_size=batch_size, verbose=1) prediction = [] total_count = replies_data.text.size neg_cnt, pos_cnt = 0, 0 for tweet_number in range(replies_data.text.size): prediction.append(argmax(result[tweet_number])) if argmax(result[tweet_number]) == 0: neg_cnt += 1 else: