def __init__(self,
                 batch_size,
                 training_dataset_folder_name,
                 total_epochs,
                 epochs_with_same_data=5,
                 folders_at_the_same_time=20,
                 to_avoid=[],
                 enable_telegram_bot=True,
                 chat_id="undefined"):
        self.x, self.y, _ = LoadData.GetData(
            training_dataset_folder_name,
            limit_value=folders_at_the_same_time,
            to_avoid=to_avoid)
        self.y = np_utils.to_categorical(self.y, 2)
        self.x = self.x.astype('float32')
        self.x /= np.max(self.x)

        self.x_next_epoch, self.y_next_epoch = self.x, self.y
        self.epoch = 0
        self.batch_size = batch_size
        self.epochs_with_same_data = epochs_with_same_data
        self.training_dataset_folder_name = training_dataset_folder_name
        self.folders_at_the_same_time = folders_at_the_same_time
        self.to_avoid = to_avoid
        self.steps_per_epoch = 0
        self.t = None
        self.total_epochs = total_epochs
        self.enable_telegram_bot = enable_telegram_bot
        self.chat_id = chat_id
 def fetch_data_for_next_couple_of_epochs(self):
     self.x_next_epoch, self.y_next_epoch, _ = LoadData.GetData(
         self.training_dataset_folder_name,
         limit_value=self.folders_at_the_same_time,
         to_avoid=self.to_avoid)
     self.y_next_epoch = np_utils.to_categorical(self.y_next_epoch, 2)
     self.x_next_epoch = self.x_next_epoch.astype('float32')
     self.x_next_epoch /= np.max(self.x_next_epoch)
Пример #3
0
 def load_data(self, folders, folders_to_load=15, to_avoid=[]):
     # print("\nstarted the fetch of data for the next epoch in parallel")
     self.x_next_epoch, self.y_next_epoch, loaded_folders_list = LoadData.GetData(folders, limit_value=folders_to_load,
                                                                                  to_avoid=to_avoid)
     self.y_next_epoch = np_utils.to_categorical(self.y_next_epoch, 2)
     self.x_next_epoch = self.x_next_epoch.astype('float32')
     self.x_next_epoch /= np.max(self.x_next_epoch)
     # print("\nended the fetch of data for the next epoch in parallel")
     return self.x_next_epoch, self.y_next_epoch, loaded_folders_list
Пример #4
0
def main():

    args = Parser.parse_command_line()
    
    samples,outdir = ld.LoadData(**args)
    dataset = ld.GetData(samples,outdir,**args)    
    
    # Creation of a directory for trained pickle file
    pkldir = join(outdir,'trained_pickle')
    if not os.path.isdir(pkldir):
        os.system('mkdir -p %s' % pkldir)
    else:
        pass
    
    if args['pkl_file'] is not None:
        tag = args['pkl_file'].replace('%s/trained' % pkldir,'').replace('.pkl','')
        ml = MLA.MLA(tag, dataset, **args)
        clf = joblib.load(args['pkl_file'])
        print "\nTrained forest is loaded from %s" % (args['pkl_file'])
        print clf        
    else:
        ml,clf = training_go_or_stop(dataset,pkldir,**args)

    evaluation_go_or_stop(ml,clf,samples,outdir,**args)
Пример #5
0
import numpy as np
import random
import tensorflow as tf
import datetime
import LoadData
#Get Data
Sequence_Length, Char_Size, char2id, id2char, X, Y = LoadData.GetData()

#Define Training Parameters
Batch_Size = 512
Steps = 10
Eta = 10.0
Log_Interval = 10
Test_Interval = 10
Hidden_Nodes = 1024
Test_Start = 'I am thinking that'
Checkpoint_Directory = 'ckpt'

#Create a checkpoint directory
if tf.gfile.Exists(Checkpoint_Directory):
    tf.gfile.DeleteRecursively(Checkpoint_Directory)
tf.gfile.MakeDirs(Checkpoint_Directory)

print('training data size:', len(X))
print('approximate steps per epoch:', int(len(X) / Batch_Size))


#Given a probability of each character, return a likely character, one-hot encoded
def Sample(Prediction):
    r = random.uniform(0, 1)
    s = 0
Пример #6
0
try:
    import LoadData as covid
except:
    import Documents.GitHub.Cov2020.Covid19.LoadData as covid

DATA = covid.GetData()
w = covid.Covid(DATA.data)
w.createEurope()
w.plotAreas("Europe")
Пример #7
0
import Spam
import LoadData

TrainData, TestData = LoadData.GetData()

classifier = Spam.SpamClassifier(TrainData)
classifier.Train()    
result = classifier.Predict(TestData['message'])
classifier.Accuracy(TestData['label'], result)
Пример #8
0
def Model(Label,Parameters=[]):
    global filepath, filename, fixed_seed_num, sequence_window, number_class, hidden_units, input_dim, learning_rate, epoch, is_multi_scale, training_level, cross_cv, is_add_noise, noise_ratio
    try:
        filepath = Parameters["filepath"]
        filename = Parameters["filename"]
        sequence_window = Parameters["sequence_window"]
        number_class = Parameters["number_class"]
        hidden_units = Parameters["hidden_units"]
        input_dim = Parameters["input_dim"]
        learning_rate = Parameters["learning_rate"]
        epoch = Parameters["epoch"]
        is_multi_scale = Parameters["is_multi_scale"]
        training_level = Parameters["training_level"]
        cross_cv = Parameters["cross_cv"]
        fixed_seed_num = Parameters["fixed_seed_num"]
        is_add_noise = Parameters["is_add_noise"]
        noise_ratio = Parameters["noise_ratio"]
    except:
        pass


    result_list_dict = defaultdict(list)
    evaluation_list = ["ACCURACY","F1_SCORE","AUC","G_MEAN"]
    for each in evaluation_list:
        result_list_dict[each] = []
    np.random.seed(fixed_seed_num)  # for reproducibility
    #num_selected_features = 30
    #num_selected_features = 25#AS leak tab=0
    #num_selected_features = 32#Slammer tab=0
    num_selected_features = 33#Nimda tab=1
    for tab_cv in range(cross_cv):

        if not tab_cv == 0 :continue
        epoch_training_loss_list = []
        epoch_val_loss_list = []
        #print(is_multi_scale)

        #using MLP to train
        if Label == "SVM":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=0)

            print(Label+" is running..............................................")
            y_train = y_train0
            clf = svm.SVC(kernel="rbf", gamma=0.00001, C=100000,probability=True)
            print(x_train.shape)
            clf.fit(x_train, y_train)
            result = clf.predict_proba(x_test)
            #return Evaluation.Evaluation(y_test, result)
            #results = Evaluation.Evaluation(y_test, result)

        elif Label == "SVMF":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=5)

            print(Label+" is running..............................................")
            clf = svm.SVC(kernel="rbf", gamma=0.00001, C=100000,probability=True)
            print(x_train.shape)
            #x_train_new = SelectKBest(f_classif, k=num_selected_features).fit_transform(x_train, y_train0)
            #x_test_new = SelectKBest(f_classif, k=num_selected_features).fit_transform(x_test, y_test0)

            clf.fit(x_train, y_train0)
            result = clf.predict_proba(x_test)
            #return Evaluation.Evaluation(y_test, result)
            #results = Evaluation.Evaluation(y_test, result)
        elif Label == "SVMW":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=6)

            print(Label + " is running..............................................")
            #SVR(kernel="linear") = svm.SVC(kernel="rbf", gamma=0.00001, C=100000, probability=True)
            estimator = svm.SVC(kernel="linear",probability=True)
            selector = RFE(estimator, num_selected_features, step=1)
            selector = selector.fit(x_train, y_train0)

            result = selector.predict_proba(x_test)
            # return Evaluation.Evaluation(y_test, result)
            # results = Evaluation.Evaluation(y_test, result)
        elif Label == "NBF":

            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=10)

            print(Label + " is running..............................................")
            clf = MultinomialNB()
            clf.fit(x_train, y_train0)
            result = clf.predict_proba(x_test)


        elif Label == "NBW":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=11)

            print(Label + " is running..............................................")
            #SVR(kernel="linear") = svm.SVC(kernel="rbf", gamma=0.00001, C=100000, probability=True)
            estimator = MultinomialNB()
            selector = RFE(estimator, num_selected_features, step=1)
            selector = selector.fit(x_train, y_train0)

            result = selector.predict_proba(x_test)
            # return Evaluation.Evaluation(y_test, result)
            # results = Evaluation.Evaluation(y_test, result)
        elif Label == "NB":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=1)

            print(Label+" is running..............................................")
            y_train = y_train0
            clf = MultinomialNB()
            clf.fit(x_train, y_train)
            result = clf.predict_proba(x_test)

            #return Evaluation.Evaluation(y_test, result)
            #results = Evaluation.Evaluation(y_test, result)

        elif Label == "DT":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=2)

            print(Label+" is running.............................................."+str(x_train.shape))
            y_train = y_train0
            clf = tree.DecisionTreeClassifier()
            clf.fit(x_train, y_train)
            result = clf.predict_proba(x_test)

            #return Evaluation.Evaluation(y_test, result)
            #results = Evaluation.Evaluation(y_test, result)
        elif Label == "Ada.Boost":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=0)

            print(Label+" is running.............................................."+str(x_train.shape))
            y_train = y_train0
            #clf = AdaBoostClassifier(n_estimators=10) #Nimda tab=1
            clf = AdaBoostClassifier(n_estimators=10)

            clf.fit(x_train, y_train)
            result = clf.predict_proba(x_test)

            #return Evaluation.Evaluation(y_test, result)
            #results = Evaluation.Evaluation(y_test, result)
        elif Label == "MLP":
            x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData_WithoutS(is_add_noise,noise_ratio,filepath, filename,
                                                                                            sequence_window, tab_cv,
                                                                                            cross_cv,
                                                                                            Multi_Scale=is_multi_scale,
                                                                                            Wave_Let_Scale=training_level,
                                                                                            Normalize=0)

            print(Label+" is running..............................................")
            batch_size = len(y_train)
            start = time.clock()
            model = Sequential()
            model.add(Dense(hidden_units, activation="relu", input_dim=33))

            model.add(Dense(output_dim=number_class))
            model.add(Activation("sigmoid"))
            # model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
            model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

            model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=epoch)
            #result = model.predict(X_Testing, batch_size=batch_size)
            result = model.predict(x_test)
            end = time.clock()
            print("The Time For MLP is " + str(end - start))

            #return Evaluation.Evaluation(y_test, result)
            #results = Evaluation.Evaluation(y_test, result)

        #elif Label == "SVM-S":
            #x_train, y_train, y_train0, x_test, y_test, y_test0 = LoadData.GetData('Attention',filepath,filename,sequence_window,tab_cv,cross_cv)
            #x_train,y_train = Manipulation(x_train,y_train0,sequence_window)
            #x_test, y_test = Manipulation(x_test, y_test0, sequence_window)
            #clf = svm.SVC(kernel="rbf")
            #clf.fit(x_train, y_train)
            #result = clf.predict(x_test)
            #results = Evaluation.Evaluation_WithoutS(y_test, result)
        elif Label == "RNN":
            print(Label+" is running..............................................")
            start = time.clock()
            x_train_multi_list, x_train, y_train, x_testing_multi_list, x_test, y_test = LoadData.GetData(is_add_noise,noise_ratio,'Attention',
                                                                                                          filepath,
                                                                                                          filename,
                                                                                                          sequence_window,
                                                                                                          tab_cv,
                                                                                                          cross_cv,
                                                                                                          Multi_Scale=is_multi_scale,
                                                                                                          Wave_Let_Scale=training_level)

            batch_size = len(y_train)
            rnn_object = SimpleRNN(hidden_units, input_length=len(x_train[0]), input_dim=input_dim)
            model = Sequential()

            model.add(rnn_object)  # X.shape is (samples, timesteps, dimension)
            #model.add(Dense(30, activation="relu"))
            #model.add(Dropout(0.2))
            model.add(Dense(30, activation="sigmoid"))
            #model.add(Dropout(0.3))
            # model.add(Dense(5,activation="tanh"))

            model.add(Dense(output_dim=number_class))
            model.add(Activation("sigmoid"))
            # model.add(Activation("softmax"))

            # model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
            model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

            model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=epoch)

            #result = model.predict(X_Testing, batch_size=batch_size)

            result = model.predict(x_test)

            #return Evaluation.Evaluation(y_test, result)
            #results = Evaluation.Evaluation(y_test, result)

            end = time.clock()
            print("The Time For RNN is " + str(end - start))

            # print(result)
        elif Label == "LSTM":
            print(Label+" is running..............................................")
            start = time.clock()
            x_train_multi_list, x_train, y_train, x_testing_multi_list, x_test, y_test = LoadData.GetData(is_add_noise,noise_ratio,'Attention',filepath,
                                                                                                          filename,
                                                                                                          sequence_window,
                                                                                                          tab_cv,
                                                                                                          cross_cv,
                                                                                                          Multi_Scale=is_multi_scale,
                                                                                                          Wave_Let_Scale=training_level)

            batch_size = len(y_train)

            lstm_object = LSTM(hidden_units, input_length=len(x_train[0]), input_dim=input_dim)
            model = Sequential()

            model.add(lstm_object)  # X.shape is (samples, timesteps, dimension)
            # model.add(LSTM(lstm_size,return_sequences=True,input_shape=(len(X_Training[0]),33)))
            # model.add(LSTM(100,return_sequences=True))
            # model.add(Dense(10, activation="tanh"))
            # model.add(Dense(5,activation="tanh"))
            model.add(Dense(30, activation="relu"))
            #model.add(Dropout(0.2))

            #model.add(Dense(30, activation="sigmoid"))
            #model.add(Dropout(0.3))
            # model.add(Dense(5,activation="tanh"))

            model.add(Dense(output_dim=number_class))
            model.add(Activation("sigmoid"))
            #model.add(Activation("softmax"))

            # model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
            model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

            model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=epoch)

            #result = model.predict(X_Testing, batch_size=batch_size)

            result = model.predict(x_test)

            end = time.clock()
            print("The Time For LSTM is " + str(end - start))

        if len(Parameters) > 0:
            return Evaluation.Evaluation(y_test, result)#Plotting AUC

        results = Evaluation.Evaluation(y_test, result)# Computing ACCURACY,F1-score,..,etc
        print(results)
        y_test2 = np.array(Evaluation.ReverseEncoder(y_test))
        result2 = np.array(Evaluation.ReverseEncoder(result))
        print("---------------------------1111111111111111")
        with open("StatFalseAlarm_"+filename+"_True.txt","w") as fout:
            for tab in range(len(y_test2)):
                fout.write(str(int(y_test2[tab]))+'\n')
        with open("StatFalseAlarm_"+filename+"_"+Label+"_"+"_Predict.txt","w") as fout:
            for tab in range(len(result2)):
                fout.write(str(int(result2[tab]))+'\n')
        print(result2.shape)
        print("---------------------------22222222222222222")

        for each_eval, each_result in results.items():
            result_list_dict[each_eval].append(each_result)

    for eachk, eachv in result_list_dict.items():
        result_list_dict[eachk] = np.average(eachv)
    #print(result_list_dict)
    if is_add_noise == False:
        with open(os.path.join(os.getcwd(),"Comparison_Log_"+filename+".txt"),"a")as fout:
            outfileline = Label+":__"
            fout.write(outfileline)
            for eachk,eachv in result_list_dict.items():
                fout.write(eachk+": "+str(round(eachv,3))+",\t")
            fout.write('\n')
    else:
        with open(os.path.join(os.getcwd(),"Comparison_Log_Adding_Noise_"+filename+".txt"),"a")as fout:
            outfileline = Label+":__"+"Noise_Ratio_:"+str(noise_ratio)
            fout.write(outfileline)
            for eachk,eachv in result_list_dict.items():
                fout.write(eachk+": "+str(round(eachv,3))+",\t")
            fout.write('\n')

    return results
Пример #9
0
def Model(each_case,Label,Parameters=[]):
    global filepath, filename, fixed_seed_num, sequence_window, number_class, hidden_units, input_dim, learning_rate, epoch, is_multi_scale, training_level, cross_cv, wave_type, is_add_noise, noise_ratio, pooling_type,corss_val_label

    try:
        filepath = Parameters["filepath"]
        filename = Parameters["filename"]
        sequence_window = Parameters["sequence_window"]
        number_class = Parameters["number_class"]
        hidden_units = Parameters["hidden_units"]
        input_dim = Parameters["input_dim"]
        learning_rate = Parameters["learning_rate"]
        epoch = Parameters["epoch"]
        training_level = Parameters["training_level"]
        cross_cv = Parameters["cross_cv"]
        fixed_seed_num = Parameters["fixed_seed_num"]
        wave_type = Parameters["wave_type"]
        is_add_noise = Parameters["is_add_noise"]
        is_multi_scale = Parameters["is_multi_scale"]
        noise_ratio = Parameters["noise_ratio"]
        pooling_type = Parameters["pooling_type"]
    except:
        pass


    result_list_dict = defaultdict(list)
    evaluation_list = ["ACCURACY","F1_SCORE","AUC","G_MEAN"]
    for each in evaluation_list:
        result_list_dict[each] = []



    for tab_cv in range(cross_cv):
        if not tab_cv == corss_val_label: continue
        print("******************************"+str(tab_cv))
        #if corss_val_label == False:
            #if 'Nimda' in filename:
                #if not tab_cv == 1: continue
            #else:
            #if not tab_cv == 1 :continue#AS Leak, Code Red I, Slammer
        #else:
            #pass

        x_train, y_train,x_test, y_test = LoadData.GetData(pooling_type,is_add_noise,noise_ratio,'Attention',filepath, filename, sequence_window,tab_cv,cross_cv,Multi_Scale=is_multi_scale,Wave_Let_Scale=training_level,Wave_Type=wave_type)

        batch_size = min(len(y_train),len(y_test))
        #batch_size = Parameters["batch_size"]
        #x_train = x_train_multi_list
        #x_test = x_testing_multi_list

        #batch_size = 10
        if Label == "MS-LSTM":
            tf.reset_default_graph()
            tf.set_random_seed(fixed_seed_num)

            num_neurons = hidden_units
            # Network building
            if is_multi_scale == True and each_case == 2:
                #umber_scale_levels = training_level
                #u_w = tf.Variable(tf.random_normal(shape=[1,number_scale_levels]), name="u_w")
                #data_original_train = tf.placeholder(tf.float32,[number_scale_levels,batch_size,sequence_window,input_dim])
                #output_data_original_train = tf.Print(data_original_train,[data_original_train],"The Original Train  is :",first_n=4096,summarize=40)
                #data_original_train2 = tf.transpose(data_original_train,[1,2,3,0])
                #data_original_train_merged = batch_vm2(data_original_train2,tf.transpose(u_w_scales_normalized))
                #data_original_train_merged = tf.reshape(data_original_train_merged,(batch_size,sequence_window,input_dim))
                #lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_neurons, forget_bias=1.0, activation=tf.nn.tanh)
                #val_list, state_list = [tf.nn.dynamic_rnn(lstm_cell, tf.gather(data_original_train2,i), dtype=tf.float32) for i in range(number_scale_levels)]
                #print(val_list)
                #val = tf.transpose(val,[1,0,2])
                #val2_list = [tf.gather(tf.gather(val_list,i),val.get_shape()[0]-1) for i in range(number_scale_levels)]
                #val = tf.reshape(val,[batch_size*number_of_scales,num_neurons])
                #out_put_val = tf.Print(val_list,[val_list],"The val shape is :",first_n=4096,summarize=40)
                #out_put_val2 = tf.Print(val2_list,[val2_list],"The val2 shape is :",first_n=4096,summarize=40)
                #Weight_W = tf.Variable(tf.truncated_normal([num_neurons,sequence_window]))
                #out_put_Weight_W = tf.Print(Weight_W,[Weight_W],"The Weight_W is :",first_n=1024,summarize=10)
                #b_W = tf.Variable(tf.constant(0.1, shape=[sequence_window,sequence_window]))
                #out_put_b_W = tf.Print(b_W,[b_W.get_shape()],"The b_W shape is :",first_n=1024,summarize=10)
                #u_current_levels_temp = tf.matmul(val2,Weight_W)+b_W
                #out_put_u_current_levels_b_W = tf.Print(b_W,[b_W],"The b_W shape is :",first_n=4096,summarize=40)
                #out_put_u_current_levels_temp = tf.Print(u_current_levels_temp,[u_current_levels_temp],"The u_current_levels_temp  is :",first_n=4096,summarize=40)
                #out_put_u_current_u_w = tf.Print(u_w,[u_w],"The u_w shape is :",first_n=4096,summarize=40)
                #u_current_levels_total = tf.gather(tf.cumsum(tf.exp(batch_vm(u_current_levels_temp,tf.transpose(u_w)))),sequence_window-1)
                #print(tf.transpose(u_w).get_shape())
                #out_put_u_current_levels_total = tf.Print(u_current_levels_total,[u_current_levels_total],"The u_current_levels_total shape is :",first_n=4096,summarize=40)
                #out_put_u_w_scale = tf.Print(u_w_scales_normalized,[u_w_scales_normalized],"The u_w_scales shape is ----------------:",first_n=4096,summarize=40)
                #u_current_levels = tf.div(tf.exp(batch_vm(u_current_levels_temp,tf.transpose(u_w))),u_current_levels_total)
                #out_put_u_current_levels = tf.Print(u_current_levels,[u_current_levels],"The u_current_levels shape is :",first_n=4096,summarize=40)
                #target = tf.placeholder(tf.float32, [batch_size, number_class])
                #print("-----------------------------%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
                #print(val.get_shape())
                #m_total = batch_vm(tf.transpose(u_current_levels),val)
                #u_w_scales_normalized = u_current_levels
                #tf.assign(u_w_scales_normalized,u_current_levels)
                #m_total = tf.mul(tf.transpose(u_current_levels),val)
                #print(m_total.get_shape())
                #out_put_m_total_shape = tf.Print(m_total,[m_total.get_shape()],"The m_total shape is :",first_n=4096,summarize=40)
                #out_put_m_total = tf.Print(m_total,[m_total],"The m_total  is :",first_n=4096,summarize=40)
                #weight = tf.Variable(tf.truncated_normal([num_neurons, int(target.get_shape()[1])]))
                #bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
                #prediction = tf.nn.softmax(tf.matmul(m_total, weight) + bias)
                #out_put_prediction = tf.Print(prediction,[prediction.get_shape()],"The prediction shape is :",first_n=1024,summarize=10)
                #print(prediction.get_shape())

                number_scale_levels = training_level
                u_w_scales_normalized = tf.Variable(tf.constant(1.0/number_scale_levels,shape=[1,number_scale_levels]), name="u_w")
                u_w_scales_normalized = normalized_scale_levels(u_w_scales_normalized)
                u_w = tf.Variable(tf.random_normal(shape=[1,sequence_window]), name="u_w")


                data_original_train = tf.placeholder(tf.float32,[number_scale_levels,batch_size,sequence_window,input_dim])

                output_data_original_train = tf.Print(data_original_train,[data_original_train],"The Original Train  is :",first_n=4096,summarize=40)

                #data_original_train = tf.placeholder(tf.float32,[batch_size,sequence_window,input_dim])
                data_original_train2 = tf.transpose(data_original_train,[1,2,3,0])
                data_original_train_merged = batch_vm2(data_original_train2,tf.transpose(u_w_scales_normalized))
                data_original_train_merged = tf.reshape(data_original_train_merged,(batch_size,sequence_window,input_dim))
                lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_neurons, forget_bias=1.0, activation=tf.nn.tanh)

                val, state = tf.nn.dynamic_rnn(lstm_cell, data_original_train_merged, dtype=tf.float32)
                #val = tf.transpose(val,[1,0,2])
                val2 = tf.gather(val,val.get_shape()[0]-1)
                #val = tf.reshape(val,[batch_size*number_of_scales,num_neurons])
                out_put_val = tf.Print(val,[val.get_shape()],"The val shape is :",first_n=4096,summarize=40)
                out_put_val2 = tf.Print(val2,[val2.get_shape()],"The val2 shape is :",first_n=4096,summarize=40)

                Weight_W = tf.Variable(tf.truncated_normal([num_neurons,sequence_window]))
                out_put_Weight_W = tf.Print(Weight_W,[Weight_W],"The Weight_W is :",first_n=1024,summarize=10)

                b_W = tf.Variable(tf.constant(0.1, shape=[sequence_window,sequence_window]))
                out_put_b_W = tf.Print(b_W,[b_W.get_shape()],"The b_W shape is :",first_n=1024,summarize=10)

                #tf.reshape(tf.matmul(tf.reshape(Aijk,[i*j,k]),Bkl),[i,j,l])

                #u_current_levels_temp = tf.reshape(tf.mul(tf.reshape(val,[batch_size*num_neurons],Weight_W)+b_W
                #print("val shape is ")
                #print(val2.get_shape())
                #print(Weight_W.get_shape())
                #print(b_W.get_shape())
                u_current_levels_temp = tf.matmul(val2,Weight_W)+b_W

                out_put_u_current_levels_b_W = tf.Print(b_W,[b_W],"The b_W shape is :",first_n=4096,summarize=40)
                out_put_u_current_levels_temp = tf.Print(u_current_levels_temp,[u_current_levels_temp],"The u_current_levels_temp  is :",first_n=4096,summarize=40)
                out_put_u_current_u_w = tf.Print(u_w,[u_w],"The u_w shape is :",first_n=4096,summarize=40)

                u_current_levels_total = tf.gather(tf.cumsum(tf.exp(batch_vm(u_current_levels_temp,tf.transpose(u_w)))),sequence_window-1)
                #print(tf.transpose(u_w).get_shape())
                out_put_u_current_levels_total = tf.Print(u_current_levels_total,[u_current_levels_total],"The u_current_levels_total shape is :",first_n=4096,summarize=40)
                out_put_u_w_scale = tf.Print(u_w_scales_normalized,[u_w_scales_normalized],"The u_w_scales shape is ----------------:",first_n=4096,summarize=40)

                u_current_levels = tf.div(tf.exp(batch_vm(u_current_levels_temp,tf.transpose(u_w))),u_current_levels_total)
                out_put_u_current_levels = tf.Print(u_current_levels,[u_current_levels],"The u_current_levels shape is :",first_n=4096,summarize=40)

                target = tf.placeholder(tf.float32, [batch_size, number_class])
                #print("-----------------------------%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
                #print(val.get_shape())


                m_total = batch_vm(tf.transpose(u_current_levels),val)

                #u_w_scales_normalized = u_current_levels
                #tf.assign(u_w_scales_normalized,u_current_levels)
                #m_total = tf.mul(tf.transpose(u_current_levels),val)

                #print(m_total.get_shape())

                out_put_m_total_shape = tf.Print(m_total,[m_total.get_shape()],"The m_total shape is :",first_n=4096,summarize=40)
                out_put_m_total = tf.Print(m_total,[m_total],"The m_total  is :",first_n=4096,summarize=40)

                weight = tf.Variable(tf.truncated_normal([num_neurons, int(target.get_shape()[1])]))
                bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
                prediction = tf.nn.softmax(tf.matmul(m_total, weight) + bias)

                out_put_prediction = tf.Print(prediction,[prediction.get_shape()],"The prediction shape is :",first_n=1024,summarize=10)
                #print(prediction.get_shape())

            else:
                try:
                    number_scale_levels = training_level
                    u_w_scales_normalized = tf.Variable(tf.constant(1.0/number_scale_levels,shape=[1,number_scale_levels]), name="u_w")
                    u_w_scales_normalized = normalized_scale_levels(u_w_scales_normalized)
                    u_w = tf.Variable(tf.random_normal(shape=[1,sequence_window]), name="u_w")


                    data_original_train = tf.placeholder(tf.float32,[number_scale_levels,batch_size,sequence_window,input_dim])

                    output_data_original_train = tf.Print(data_original_train,[data_original_train],"The Original Train  is :",first_n=4096,summarize=40)

                    #data_original_train = tf.placeholder(tf.float32,[batch_size,sequence_window,input_dim])
                    data_original_train2 = tf.transpose(data_original_train,[1,2,3,0])
                    data_original_train_merged = batch_vm2(data_original_train2,tf.transpose(u_w_scales_normalized))
                    data_original_train_merged = tf.reshape(data_original_train_merged,(batch_size,sequence_window,input_dim))
                    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_neurons, forget_bias=1.0, activation=tf.nn.tanh)

                    val, state = tf.nn.dynamic_rnn(lstm_cell, data_original_train_merged, dtype=tf.float32)

                    target = tf.placeholder(tf.float32, [batch_size, number_class])






                except:
                    data_original_train = tf.placeholder(tf.float32, [None,sequence_window,input_dim])
                    target = tf.placeholder(tf.float32, [None, number_class])
                    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_neurons, forget_bias=1.0, activation=tf.nn.tanh)
                    val, state = tf.nn.dynamic_rnn(lstm_cell, data_original_train, dtype=tf.float32)


                val = tf.transpose(val, [1, 0, 2])
                last = tf.gather(val, int(val.get_shape()[0]) - 1)

                weight = tf.Variable(tf.truncated_normal([num_neurons, int(target.get_shape()[1])]))
                bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))

                prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)


            #cost_cross_entropy = -tf.reduce_mean(target * tf.log(prediction))
            cost_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(prediction, target, name=None))  # Sigmoid

            #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
            minimize = optimizer.minimize(cost_cross_entropy)

            #mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
            #error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
            correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(target, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

            init_op = tf.initialize_all_variables()
            sess = tf.Session()

            sess.run(init_op)


            no_of_batches = int(len(y_train) / batch_size)
            epoch_training_loss_list = []
            epoch_training_acc_list = []
            epoch_val_loss_list = []
            epoch_val_acc_list = []
            weight_list=[]
            early_stopping = 100
            epoch_stop = epoch
            for i in range(epoch):
                if early_stopping > 0:
                    pass
                else:
                    epoch_stop = i+1
                    break
                ptr = 0
                for j in range(no_of_batches):
                    inp, out = x_train[:,ptr:ptr + batch_size], y_train[ptr:ptr + batch_size]
                    inp2, out2 = x_test[:,ptr:ptr + batch_size], y_test[ptr:ptr + batch_size]
                    #print("INPUT IS ")
                    #print(inp)
                    #print("OUTPUT IS ")
                    #print(inp2)
                    #da.Plotting_Sequence(inp,out)
                    try:
                        #pass

                        sess.run(out_put_u_w_scale, {data_original_train: inp, target: out})
                        #sess.run(output__1,{data_original_train: inp, target: out})
                        #sess.run(output0,{data_original_train: inp, target: out})
                        #sess.run(output1,{data_original_train: inp, target: out})

                        #print("11111")
                        #print(out_put_u_w_scale)
                        #print("22222")
                        #print(normalized_scale_levels(out_put_u_w_scale))
                        #print(normalized_scale_levels(out_put_u_w_scale).shape)
                        #sess.run(tf.assign(u_w_scales,normalized_scale_levels(out_put_u_w_scale)))

                        #sess.run(out_put_original_train, {data_original_train: inp, target: out})
                        sess.run(out_put_val, {data_original_train: inp, target: out})
                        sess.run(out_put_val2, {data_original_train: inp, target: out})
                        #sess.run(out_put_Weight_W, {data_original_train: inp, target: out})
                        #sess.run(out_put_u_current_levels_temp, {data_original_train: inp, target: out})
                        #sess.run(out_put_u_current_u_w, {data_original_train: inp, target: out})
                        #sess.run(out_put_u_current_levels_b_W, {data_original_train: inp, target: out})

                        #sess.run(out_put_u_current_levels_total, {data_original_train: inp, target: out})
                        weight_list.append(sess.run(u_current_levels, {data_original_train: inp, target: out}))
                        #sess.run(out_put_m_total, {data_original_train: inp, target: out})
                        #sess.run(out_put_m_total_shape, {data_original_train: inp, target: out})

                        #sess.run(out_put_prediction, {data_original_train: inp, target: out})
                    except:
                        pass
                    #print(out)
                    ptr += batch_size
                    print(inp.shape)
                    sess.run(minimize, {data_original_train: inp,target: out})
                    training_acc,training_loss = sess.run((accuracy,cost_cross_entropy),{data_original_train: inp, target: out})
                        #sess.run(out_put_before_multi_first_level,{data_original_train: inp, target: out})
                        #sess.run(output_data_for_lstm_multi_scale,{data_original_train: inp, target: out})

                    epoch_training_loss_list.append(training_loss)
                    epoch_training_acc_list.append(training_acc)
                        #sess.run(out_put_before_multi_first_level,{data_original_train: inp, target: out})
                        #sess.run(out_put_before_multi_second_level,{data_original_train: inp, target: out})
                        #sess.run(out_put_before_multi_third_level,{data_original_train: inp, target: out})
                        #sess.run(out_put_after_multi_level,{data_original_train: inp, target: out})

                    #sess.run(minimize, {data_original_train: inp2,target: out2})

                    val_acc,val_loss = sess.run((accuracy,cost_cross_entropy),{data_original_train: inp2, target: out2})
                    epoch_val_loss_list.append(val_loss)
                    epoch_val_acc_list.append(val_acc)
                print("Epoch %s"%(str(i+1))+">"*20+"="+"train_accuracy: %s, train_loss: %s"%(str(training_acc),str(training_loss))\
                      +",\tval_accuracy: %s, val_loss: %s"%(str(val_acc),str(val_loss)))
                try:
                    max_val_acc = epoch_val_acc_list[-2]
                except:
                    max_val_acc = 0

                if epoch_val_acc_list[-1] < max_val_acc:
                    early_stopping -= 1
                elif epoch_val_acc_list[-1] >= max_val_acc:
                    early_stopping = 100
            #incorrect = sess.run(error, {data: x_test, target: y_test})
            #print("x_test shape is ..."+str(x_test.shape))
            #print(x_test)
            try:
                result = sess.run(prediction, {data_original_train: x_test, target: y_test})
            except:
                x_test = x_test[0:batch_size]
                y_test = y_test[0:batch_size]
                result = sess.run(prediction, {data_original_train: x_test, target: y_test})

            #print(result)
            #print("shape is ("+str(len(result))+","+str(len(result[0]))+')')
            #print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * incorrect))
            #if training_level > 0:
                #scale_weight = sess.run(scale_weight, {data_original_train: x_test, target: y_test})
                #print("The final scale weight is :\n")
                #print(scale_weight)
            #save_path = saver.save(sess, os.path.join(os.getcwd(),"modelckpt.txt"))
            #aaa = saver.restore(sess, os.path.join(os.getcwd(),"modelckpt.txt"))
            #all_variables = tf.trainable_variables()
            #var = [v for v in tf.trainable_variables() if v.name == "scale_weight"]
            sess.close()

        elif Label == "MS-LSTMB":
            pass

        results = Evaluation.Evaluation(y_test, result)#Computing ACCURACY, F1-Score, .., etc

        try:
            for each_eval, each_result in results.items():
                result_list_dict[each_eval].append(each_result)
            if len(Parameters) > 0:
                label = "PW"
            else:
                label = "DA"
        except:
            label = "AUC"

        #if len(Parameters) > 0:
            #try:
                #for each_eval, each_result in results.items():
                    #result_list_dict[each_eval].append(each_result)
                #label = "PW"
                #with open(os.path.join(os.getcwd(), "TensorFlow_Log" + filename + ".txt"), "a")as fout:
                  #  if training_level > 0:
                   #     outfileline = Label + "_____epoch:" + str(epoch) + ",_____learning rate:" + str(learning_rate) + ",_____multi_scale:" + str(is_multi_scale) + "\n"
                    #else:
                     #   outfileline = Label + "_____epoch:" + str(epoch) + ",_____learning rate:" + str(learning_rate) + ",_____multi_scale:" + str(is_multi_scale) + ",_____train_set_using_level:" + str(training_level) + "\n"

                    #fout.write(outfileline)
                    #for eachk, eachv in result_list_dict.items():
                     #   fout.write(eachk + ": " + str(round(eachv, 3)) + ",\t")
                    #fout.write('\n')

                #return results
            #except:
                #label = "AUC"
                #return Evaluation.Evaluation(y_test, result)#Plotting AUC
        #else:
            #for each_eval, each_result in results.items():
                #result_list_dict[each_eval].append(each_result)
            #label = "da"


        #if label == "AUC": return results
        if label == "DA":
            pass
            """
            y_test2 = np.array(Evaluation.ReverseEncoder(y_test))
            result2 = np.array(Evaluation.ReverseEncoder(result))
            with open("StatFalseAlarm_"+filename+"_True.txt","w") as fout:
                for tab in range(len(y_test2)):
                    fout.write(str(int(y_test2[tab]))+'\n')
            with open("StatFalseAlarm_"+filename+"_"+Label+"_"+"_Predict.txt","w") as fout:
                for tab in range(len(result2)):
                    fout.write(str(int(result2[tab]))+'\n')
            """
    try:
        for eachk, eachv in result_list_dict.items():
            result_list_dict[eachk] = np.average(eachv)
        print(result_list_dict)
        if is_add_noise == False:
            if corss_val_label == 0:
                outputfilename = "Tab_A_MS-LSTM_Log_"+filename+".txt"

            else:
                outputfilename = "Tab_B_MS-LSTM_Log_"+filename+".txt"
            with open(os.path.join(os.getcwd(),outputfilename),"a")as fout:
                if training_level>0:
                    outfileline = Label+"_epoch:"+str(epoch_stop)+",__wavelet type:"+str(wave_type)+",__pooling type:"+str(pooling_type)+",__learning rate:"+str(learning_rate)+",__multi_scale:"+str(is_multi_scale)+",__scale_levels:"+str(training_level)+",__sequence_window:"+str(sequence_window)+"\n"
                else:
                    outfileline = Label+"_epoch:"+str(epoch_stop)+",__wavelet type:"+str(wave_type)+",__learning rate:"+str(learning_rate)+",__multi_scale:"+str(is_multi_scale)+",__scale_levels:"+str(training_level)+",__sequence_window:"+str(sequence_window)+"\n"

                fout.write(outfileline)
                for eachk,eachv in result_list_dict.items():
                    fout.write(eachk+": "+str(round(eachv,3))+",\t")
                fout.write('\n')
        else:
            with open(os.path.join(os.getcwd(), "MS-LSTM_Log_Adding_Noise_" + filename + ".txt"), "a")as fout:
                if training_level > 0:
                    outfileline = Label + "_____epoch:" + str(epoch_stop) +",_____pooling type:"+str(pooling_type)+ ",_____learning rate:" + \
                        str(learning_rate) + ",_____multi_scale:" + str(is_multi_scale) + "\n"
                else:
                    outfileline = Label + "_____epoch:" + str(epoch_stop) + ",_____pooling type:"+str(pooling_type)+ ",_____learning rate:" + \
                        str(learning_rate) + ",_____multi_scale:" + str(is_multi_scale) + ",_____train_set_using_level:" + str(training_level) + "\n"

                fout.write(outfileline)
                for eachk, eachv in result_list_dict.items():
                    fout.write(eachk + ": " + str(round(eachv, 3)) + ",\t")
                fout.write('\n')
    except:
        pass
    #print("lallala")
    #print(epoch_training_loss_list)
    if not "DA"==label: return results
    return epoch_training_loss_list,epoch_val_loss_list,epoch_training_acc_list,epoch_val_acc_list,weight_list,results