# k_cross trainning f = open(path.join(path.dirname(__file__),'..','record','temp.txt'), 'w') # for n_epochs in [20,30,40,50,60,70,75,80,90,100,150,200,250,300]: for n_epochs in [650,700,750,800]: score_list_5chunk = [] confusion_matrix_5chunk = [] for train_chunk_number in range(5): train_chunk_number = train_chunk_number + 1 train, test = k_cross(dataset, train_chunk_number) X_train_text = train[:, 3: 103] X_test_text = test[:, 3: 103] X_train_senti = train[:, 103: 109] X_test_senti = test[:, 103: 109] X_train_lstm = train[:, 109: 237] X_test_lstm = test[:, 109: 237] X_train_dense = train[:, 237: 287] X_test_dense = test[:, 237: 287] X_train_decomdense = train[:, 287: 307] X_test_decomdense = test[:, 287: 307] Y_train = train[:, 0] Y_test = test[:, 0]
for n_epochs in [15, 20, 30]: score_list_5chunk = [] confusion_matrix_5chunk = [] for train_chunk_number in range(5): train_chunk_number = train_chunk_number + 1 # train_text, test_text = k_cross(dataset, train_chunk_number) X_train_lstm, X_test_lstm = k_cross_3(dynamic_lstm_dataset, train_chunk_number) # X_train_text = train_text[:, 3: 103] # X_test_text = test_text[:, 3: 103] # Y_train = train_text[:, label_index] # Y_test = test_text[:, label_index] train_text, test_text = k_cross(dataset, train_chunk_number) Y_train, Y_test = k_cross(label, train_chunk_number) X_train_cnn, X_test_cnn = k_cross_3(dynamic_lstm_dataset, train_chunk_number) X_train_cnn = X_train_cnn.reshape( (X_train_cnn.shape[0], X_train_cnn.shape[1], X_train_cnn.shape[2], 1)) X_test_cnn = X_test_cnn.reshape( (X_test_cnn.shape[0], X_test_cnn.shape[1], X_test_cnn.shape[2], 1)) X_train_text = train_text[:, 3:103] X_test_text = test_text[:, 3:103] # print(X_train_lstm.shape) encoder = LabelEncoder() encoder_label_train = encoder.fit_transform(Y_train)
print(dynamic_lstm_dataset.shape) from keras.models import Model, Sequential from keras.layers import Dense, Activation, Dropout, Embedding, Input from keras.layers import LSTM f = open(path.join(path.dirname(__file__), '..', 'record', 'temp.txt'), 'w') for n_epochs in [20, 30, 40, 50, 60, 70, 75, 80, 90, 100, 150, 200, 250, 300]: score_list_5chunk = [] confusion_matrix_5chunk = [] for train_chunk_number in range(5): train_chunk_number = train_chunk_number + 1 train_text, test_text = k_cross(dataset, train_chunk_number) X_train_lstm, X_test_lstm = k_cross_3(dynamic_lstm_dataset, train_chunk_number) X_train_text = train_text[:, 3:103] X_test_text = test_text[:, 3:103] Y_train = train_text[:, label_index] Y_test = test_text[:, label_index] # print(X_train_lstm.shape) encoder = LabelEncoder() encoder_label_train = encoder.fit_transform(Y_train) dummy_Y_train = np_utils.to_categorical(encoder_label_train) encoder_label_test = encoder.fit_transform(Y_test)
f = open(path.join(path.dirname(__file__), '..', 'record', 'temp.txt'), 'w') # for n_epochs in [20,30,40,50,60,70,75,80,90,100,150,200,250,300]: for n_epochs in [15, 20, 30]: score_list_5chunk = [] confusion_matrix_5chunk = [] early_stopping = EarlyStopping(monitor='val_loss', patience=12, verbose=0, mode='min') for train_chunk_number in range(5): train_chunk_number = train_chunk_number + 1 Y_train, Y_test = k_cross(label, train_chunk_number) X_train, X_test = k_cross_3(user_sequence, train_chunk_number) print(X_train.shape) X_train = X_train.reshape( (X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)) X_test = X_test.reshape( (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)) encoder = LabelEncoder() encoder_label_train = encoder.fit_transform(Y_train) dummy_Y_train = np_utils.to_categorical(encoder_label_train) encoder_label_test = encoder.fit_transform(Y_test) dummy_Y_test = np_utils.to_categorical(encoder_label_test) # 二分类