features_minmax = min_max_scaler.fit_transform(features)
features_analysis_result = dict()
accuracies = list()

for i in range(len(feature_names)):
    cur_features = features
    cur_features = cur_features.drop(feature_names[i], axis=1)
    min_max_scaler = preprocessing.MinMaxScaler()
    cur_features = min_max_scaler.fit_transform(cur_features)
    cur_feature_name = feature_names[i]
    ### split data into training set and label set
    X_train, X_test, y_train, y_test = train_test_split(cur_features,
                                                        onehotlabels,
                                                        test_size=0.4,
                                                        random_state=42)
    transformed_y = deep_learning_models.transformResult(y_test)
    #X_train, X_test, y_train, y_test = train_test_split(features_minmax, labels, test_size=0.4, random_state=42)

    ### adjust the dataset dimension
    # reshape X to be [samples, time steps, features]
    X_train_LSTM = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
    X_test_LSTM = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))

    ### create the deep learning models
    epochs = 15
    batch_size = 195  #195 best now #190 # 100 # 250
    dropoutRate = 0.2

    ## stacked LSTM
    model, hist = deep_learning_models.stacked_LSTM(X_train_LSTM, y_train,
                                                    X_test_LSTM, y_test,
Exemple #2
0
### create the deep learning models
epochs = 15
batch_size = 195  #195 best now #190 # 100 # 250
dropoutRate = 0.2
"""
epochs = 200
batch_size = 20
dropoutRate = 0.3
"""
y_test_re = np.argmax(y_test, axis=1)
accuracies = list()
precisions = list()
Fs = list()
TNRs = list()
recalls = list()
transformed_y = deep_learning_models.transformResult(y_test)
transformed_y_train = deep_learning_models.transformResult(
    y_train)  ### one dimention list

### Bidirectional LSTM
#start_time0 = time.clock()
model0, hist0 = deep_learning_models.Bidirectional_LSTM(
    X_train_LSTM, y_train, X_test_LSTM, y_test, batch_size, epochs)
prediction0 = model0.predict(X_test_LSTM)
transformed_pre0 = deep_learning_models.transformResult(prediction0)
#end_time0 = time.clock()
#Bi_LSTM_performance = end_time0 - start_time0
prediction0_re = np.argmax(prediction0, axis=1)
Bidirectional_LSTM_accuracy, bi_precision, bi_recall, bi_F1, bi_fbeta = deep_learning_models.getAccuracyMulti(
    transformed_pre0, transformed_y)
keys = hist0.history.keys()
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=.5,
                                                    random_state=random_state)

# We use OneVsRestClassifier for multi-label prediction
from sklearn.multiclass import OneVsRestClassifier

# Run classifier
"""
classifier = OneVsRestClassifier(svm.LinearSVC(random_state=random_state))
classifier.fit(X_train, Y_train)
y_score = classifier.decision_function(X_test)
"""
### naive bayes classifier
y_train_list = deep_learning_models.transformResult(Y_train)
"""
gnb = GaussianNB().fit(X_train, y_train_list)
print("Y train")
print(Y_train.shape)
y_score = gnb.predict_proba(X_test)
plot_average_precision(Y_test, y_score)
"""
"""
### Decision tree #####
dtree_model = DecisionTreeClassifier(max_depth = 2).fit(X_train, y_train_list)
dtree_predictions = dtree_model.predict(X_test)
y_score_dt = dtree_model.predict_proba(X_test)
plot_average_precision(Y_test, y_score_dt)
"""
"""