def run_LSTM(X_train, X_valid, y_train, y_test, y_name, y_train_mean,
             y_train_std, save_folder):
    # ### 4H. LSTM (Long Short Term Memory)
    print 'head items to fit are: ', y_name
    # In[ ]:
    for head_item in range(len(y_name)):

        y_train_item = y_train[:, head_item]
        y_train_item = np.reshape(y_train_item, [y_train.shape[0], 1])

        y_test_item = y_test[:, head_item]
        y_test_item = np.reshape(y_test_item, [y_test_item.shape[0], 1])
        print '********************************** Fitting Deep Net on %s Data **********************************' % y_name[
            head_item]
        #Declare model
        model_lstm = LSTMDecoder(dropout=0.25, num_epochs=5)

        model_lstm.get_means(
            y_train_mean,
            y_train_std)  ### for un-zscoring during loss calculation ???

        #Fit model
        model_lstm.fit(X_train, y_train_item)

        #Get predictions
        y_valid_predicted_lstm = model_lstm.predict(X_valid)

        training_prediction = model_lstm.predict(X_train)

        R2s_training = get_R2(y_train_item, training_prediction)
        print 'R2 on training set = ', R2s_training

        #Get metric of fit
        R2s_lstm = get_R2(y_test_item, y_valid_predicted_lstm)
        print('R2s:', R2s_lstm)
        print 'saving prediction ...'

        np.savez(save_folder + y_name[head_item] + '_LSTM_ypredicted.npz',
                 y_test=y_test_item,
                 y_prediction=y_valid_predicted_lstm,
                 y_train_=y_train_item,
                 training_prediction=training_prediction,
                 y_train_mean=y_train_mean[head_item],
                 y_train_std=y_train_std[head_item])
        #print 'saving model ...'
        #joblib.dump(model_lstm, y_name[head_item] + '_LSTM.pkl')
        print 'plotting results...'
        plot_results(y_test_item,
                     y_valid_predicted_lstm,
                     y_name[head_item],
                     R2s_lstm,
                     model_name='LSTM',
                     save_folder=save_folder)

    return model_lstm
def ridgeCV_model(X_train, X_valid, y_train, y_test, y_name, y_train_mean,
                  y_train_std):

    print 'head items to fit are: ', y_name
    # In[ ]:
    for head_item in range(len(y_name)):

        y_train_item = y_train[:, head_item]
        y_train_item = np.reshape(y_train_item, [y_train.shape[0], 1])

        y_test_item = y_test[:, head_item]
        y_test_item = np.reshape(y_test_item, [y_test_item.shape[0], 1])
        print '********************************** Fitting RidgeCV on %s Data **********************************' % y_name[
            head_item]
        #Declare model
        model = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0],
                                     normalize=True,
                                     fit_intercept=True)

        #Fit model
        model.fit(X_train, y_train_item)

        #Get predictions
        y_valid_predicted = model.predict(X_valid)

        training_prediction = model.predict(X_train)

        R2s_training = get_R2(y_train_item, training_prediction)
        print 'R2 on training set = ', R2s_training

        #Get metric of fit
        R2s = get_R2(y_test_item, y_valid_predicted)
        print('R2s:', R2s)
        print 'saving prediction ...'
        np.savez(y_name[head_item] + '_RidgeCV_ypredicted.npz',
                 y_test=y_test_item,
                 y_prediction=y_valid_predicted,
                 y_train_=y_train_item,
                 training_prediction=training_prediction,
                 y_train_mean=y_train_mean[head_item],
                 y_train_std=y_train_std[head_item])
        #print 'saving model ...'
        joblib.dump(model, y_name[head_item] + '_Ridge.pkl')
        print 'plotting results...'
        plot_results(y_test_item,
                     y_valid_predicted,
                     y_name[head_item],
                     R2s,
                     model_name='RidgeCV')

    return model
def WienerCascade(X_train, X_valid, y_train, y_test, y_name, y_train_mean,
                  y_train_std):
    # ### 4B. Wiener Cascade (Linear Nonlinear Model)

    print 'head items to fit are: ', y_name
    # In[ ]:
    for head_item in range(len(y_name)):

        y_train_item = y_train[:, head_item]
        y_train_item = np.reshape(y_train_item, [y_train.shape[0], 1])

        y_test_item = y_test[:, head_item]
        y_test_item = np.reshape(y_test_item, [y_test_item.shape[0], 1])
        print '********************************** Fitting WienerCascade on %s Data **********************************' % y_name[
            head_item]
        #Declare model
        model = WienerCascadeDecoder(degree=3)

        #Fit model
        model.fit(X_train, y_train_item)

        #Get predictions
        y_valid_predicted = model.predict(X_valid)

        training_prediction = model.predict(X_train)

        R2s_training = get_R2(y_train_item, training_prediction)
        print 'R2 on training set = ', R2s_training

        #Get metric of fit
        R2s = get_R2(y_test_item, y_valid_predicted)
        print('R2s:', R2s)
        print 'saving prediction ...'
        np.savez(y_name[head_item] + '_WienerCascade_ypredicted.npz',
                 y_test=y_test_item,
                 y_prediction=y_valid_predicted,
                 y_train_=y_train_item,
                 training_prediction=training_prediction,
                 y_train_mean=y_train_mean[head_item],
                 y_train_std=y_train_std[head_item])
        #print 'saving model ...'
        joblib.dump(model, y_name[head_item] + '_WienerCascade.pkl')
        print 'plotting results...'
        plot_results(y_test_item,
                     y_valid_predicted,
                     y_name[head_item],
                     R2s,
                     model_name='WienerCascade')

    return model
def BayesianRidge_model(X_train, X_valid, y_train, y_test, y_name,
                        y_train_mean, y_train_std):

    model_name = 'BayesianRidge'
    print 'head items to fit are: ', y_name
    # In[ ]:
    for head_item in range(len(y_name)):

        y_train_item = y_train[:, head_item]
        #y_train_item = np.reshape(y_train_item,[y_train.shape[0],1])

        y_test_item = y_test[:, head_item]
        #y_test_item = np.reshape(y_test_item,[y_test_item.shape[0],1])
        print '********************************** Fitting %s on %s Data **********************************' % (
            model_name, y_name[head_item])
        #Declare model
        model = linear_model.BayesianRidge(compute_score=True)

        #Fit model
        model.fit(X_train, y_train_item)

        #Get predictions
        y_valid_predicted = model.predict(X_valid)

        training_prediction = model.predict(X_train)

        R2s_training = get_R2(y_train_item, training_prediction)
        print 'R2 on training set = ', R2s_training

        #Get metric of fit
        R2s = get_R2(y_test_item, y_valid_predicted)
        print('R2s:', R2s)
        print 'saving prediction ...'
        np.savez(y_name[head_item] + '_%s_ypredicted.npz' % model_name,
                 y_test=y_test_item,
                 y_prediction=y_valid_predicted,
                 y_train_=y_train_item,
                 training_prediction=training_prediction,
                 y_train_mean=y_train_mean[head_item],
                 y_train_std=y_train_std[head_item])
        #print 'saving model ...'
        joblib.dump(model, y_name[head_item] + '_%s.pkl' % model_name)
        print 'plotting results...'
        plot_results(y_test_item,
                     y_valid_predicted,
                     y_name[head_item],
                     R2s,
                     model_name=model_name)

    return model
Ejemplo n.º 5
0
 def LSTM_evaluate(units,dropout,num_epochs):
     units=int(units)
     num_epochs=int(num_epochs)
     model_LSTM=LSTMDecoder(units,dropout,num_epochs)
     model_LSTM.fit(X_train,y_train)
     y_valid_pred=model_LSTM.predict(X_valid)
     return np.mean(get_R2(y_valid,y_valid_pred))
def run_LSTM(X_train, X_valid, y_train, y_valid):
    # ### 4H. LSTM (Long Short Term Memory)

    # In[ ]:

    #Declare model
    model_lstm = LSTMDecoder(units=400, dropout=0, num_epochs=5)

    #Fit model
    model_lstm.fit(X_train, y_train)

    #Get predictions
    y_valid_predicted_lstm = model_lstm.predict(X_valid)

    #Get metric of fit
    R2s_lstm = get_R2(y_valid, y_valid_predicted_lstm)
    print('R2s:', R2s_lstm)

    print 'Results: '
    print 'y_valid_predicted_lstm = ', y_valid_predicted_lstm
    print 'y_valid = ', y_valid

    np.savez('lstm_ypredicted.npz',
             y_valid_predicted_lstm=y_valid_predicted_lstm,
             y_valid=y_valid)

    plot_results(y_valid, y_valid_predicted_lstm)

    return model_lstm
def DNN(X_train,X_valid,y_train,y_test,y_name):
	# ### 4E. Dense Neural Network

	
	print('head items to fit are: ', y_name)
	# In[ ]:
	for head_item in range(len(y_name)):

		y_train_item = y_train[:,head_item]
		y_train_item = np.reshape(y_train_item,[y_train.shape[0],1])

		y_test_item = y_test[:,head_item]
		y_test_item = np.reshape(y_test_item,[y_test_item.shape[0],1])
		print('********************************** Fitting DNN on %s Data **********************************' % y_name[head_item])
		#Declare model
		model_dnn=DenseNNDecoder(units=[128,64,32],num_epochs=15)

		#Fit model
		model_dnn.fit(X_train,y_train_item)

		#Get predictions
		y_valid_predicted=model_dnn.predict(X_valid)

		#Get metric of fit
		R2s=get_R2(y_test_item,y_valid_predicted)
		print('R2s:', R2s)
		#print 'saving prediction ...'
		np.savez(y_name[head_item] + '_DNN_ypredicted.npz',y_test=y_test_item,y_prediction=y_valid_predicted)
		#print 'saving model ...'
		#joblib.dump(model_dnn, y_name[head_item] + '_LSTM.pkl') 
		print('plotting results...')
		plot_results(y_test_item,y_valid_predicted,y_name[head_item],R2s,model_name='DNN')

	return model_dnn
def RNN(X_train,y_train,X_valid,y_valid,y_name):
	model_name = 'RNN'
	# ### 4F. Simple RNN
	#print '############################# RUNNING RNN #############################'
	# In[ ]:

	#Declare model
	model_rnn=SimpleRNNDecoder(units=400,dropout=0,num_epochs=100)

	for head_item in range(len(y_name)):
		### fit one at a time and save/plot the results 
		#print '########### Fitting RNN on %s data ###########' % y_name[head_item]

		y_train_item = y_train[:,head_item]
		y_train_item = np.reshape(y_train_item,[y_train.shape[0],1])
		#print 'shape of y_train_item = ', y_train_item.shape

		y_valid_item = y_valid[:,head_item]
		y_valid_item = np.reshape(y_valid_item,[y_valid_item.shape[0],1])

		model_rnn.fit(X_train,y_train_item)

		#Get predictions
		y_valid_predicted_rnn=model_rnn.predict(X_valid)

		#Get metric of fit
		R2s_rnn=get_R2(y_valid_item,y_valid_predicted_rnn)
		print(y_name[head_item], 'R2:', R2s_rnn)

		np.savez(y_name[head_item] + '_rnn_ypredicted.npz',y_valid=y_valid_item,y_valid_predicted_rnn=y_valid_predicted_rnn)


		plot_results(y_valid_item,y_valid_predicted_rnn,y_name[head_item],R2s_rnn,model_name)
def SVR(X_flat_train, X_flat_valid, y_train, y_valid, y_name):
    # ### 4D. SVR (Support Vector Regression)

    # In[40]:

    #The SVR works much better when the y values are normalized, so we first z-score the y values
    #They have previously been zero-centered, so we will just divide by the stdev (of the training set)
    y_train_std = np.nanstd(y_train, axis=0)
    y_zscore_train = y_train / y_train_std
    #y_zscore_test=y_test/y_train_std
    y_zscore_valid = y_valid / y_train_std

    #Declare model
    model_svr = SVRDecoder(C=.1, max_iter=10000, gamma=1e-5)

    #Fit model

    for head_item in range(len(y_name)):
        ### fit one at a time and save/plot the results
        print '########### Fitting SVR on %s data ###########' % y_name[
            head_item]

        y_zscore_train_item = y_zscore_train[:, head_item]
        y_zscore_train_item = np.reshape(y_zscore_train_item,
                                         [y_zscore_train.shape[0], 1])
        print 'shape of y_zscore_train_item = ', y_zscore_train_item.shape

        y_zscore_valid_item = y_zscore_valid[:, head_item]
        y_zscore_valid_item = np.reshape(y_zscore_valid_item,
                                         [y_zscore_valid_item.shape[0], 1])

        model_svr.fit(X_flat_train, y_zscore_train_item)

        #Get predictions
        y_zscore_valid_predicted_svr = model_svr.predict(X_flat_valid)

        #Get metric of fit
        R2s_svr = get_R2(y_zscore_valid_item, y_zscore_valid_predicted_svr)
        print(y_name[head_item], 'R2:', R2s_svr)

        np.savez(y_name[head_item] + '_svr_ypredicted.npz',
                 y_zscore_valid=y_zscore_valid_item,
                 y_zscore_valid_predicted_svr=y_zscore_valid_predicted_svr)

        plot_results(y_zscore_valid_item, y_zscore_valid_predicted_svr,
                     y_name[head_item], R2s_svr)
def GRU():
    # ### 4G. GRU (Gated Recurrent Unit)

    # In[ ]:

    #Declare model
    model_gru = GRUDecoder(units=400, dropout=0, num_epochs=5)

    #Fit model
    model_gru.fit(X_train, y_train)

    #Get predictions
    y_valid_predicted_gru = model_gru.predict(X_valid)

    #Get metric of fit
    R2s_gru = get_R2(y_valid, y_valid_predicted_gru)
    print('R2s:', R2s_gru)
def RNN():
    # ### 4F. Simple RNN

    # In[ ]:

    #Declare model
    model_rnn = SimpleRNNDecoder(units=400, dropout=0, num_epochs=5)

    #Fit model
    model_rnn.fit(X_train, y_train)

    #Get predictions
    y_valid_predicted_rnn = model_rnn.predict(X_valid)

    #Get metric of fit
    R2s_rnn = get_R2(y_valid, y_valid_predicted_rnn)
    print('R2s:', R2s_rnn)
def DNN():
    # ### 4E. Dense Neural Network

    # In[ ]:

    #Declare model
    model_dnn = DenseNNDecoder(units=400, dropout=0.25, num_epochs=10)

    #Fit model
    model_dnn.fit(X_flat_train, y_train)

    #Get predictions
    y_valid_predicted_dnn = model_dnn.predict(X_flat_valid)

    #Get metric of fit
    R2s_dnn = get_R2(y_valid, y_valid_predicted_dnn)
    print('R2s:', R2s_dnn)
def XGBoost():
    # ### 4C. XGBoost (Extreme Gradient Boosting)

    # In[ ]:

    #Declare model
    model_xgb = XGBoostDecoder(max_depth=3, num_round=200, eta=0.3, gpu=-1)

    #Fit model
    model_xgb.fit(X_flat_train, y_train)

    #Get predictions
    y_valid_predicted_xgb = model_xgb.predict(X_flat_valid)

    #Get metric of fit
    R2s_xgb = get_R2(y_valid, y_valid_predicted_xgb)
    print('R2s:', R2s_xgb)
def WienerCascade():
    # ### 4B. Wiener Cascade (Linear Nonlinear Model)

    # In[39]:

    #Declare model
    model_wc = WienerCascadeDecoder(degree=3)

    #Fit model
    model_wc.fit(X_flat_train, y_train)

    #Get predictions
    y_valid_predicted_wc = model_wc.predict(X_flat_valid)

    #Get metric of fit
    R2s_wc = get_R2(y_valid, y_valid_predicted_wc)
    print('R2s:', R2s_wc)
def Wiener(X_flat_train, X_flat_valid, y_train, y_valid):
    #Declare model
    model_wf = WienerFilterDecoder()

    #Fit model
    model_wf.fit(X_flat_train, y_train)

    #Get predictions
    y_valid_predicted_wf = model_wf.predict(X_flat_valid)

    #Get metric of fit
    R2s_wf = get_R2(y_valid, y_valid_predicted_wf)
    print('R2s:', R2s_wf)

    #plot_results(y_valid,y_valid_predicted_wf)

    return model_wf
def SVR():
    # ### 4D. SVR (Support Vector Regression)

    # In[40]:

    #The SVR works much better when the y values are normalized, so we first z-score the y values
    #They have previously been zero-centered, so we will just divide by the stdev (of the training set)
    y_train_std = np.nanstd(y_train, axis=0)
    y_zscore_train = y_train / y_train_std
    y_zscore_test = y_test / y_train_std
    y_zscore_valid = y_valid / y_train_std

    #Declare model
    model_svr = SVRDecoder(C=5, max_iter=4000)

    #Fit model
    model_svr.fit(X_flat_train, y_zscore_train)

    #Get predictions
    y_zscore_valid_predicted_svr = model_svr.predict(X_flat_valid)

    #Get metric of fit
    R2s_svr = get_R2(y_zscore_valid, y_zscore_valid_predicted_svr)
    print('R2s:', R2s_svr)
Ejemplo n.º 17
0
def AllDecoders(X,y):
    train_R2=[]
    test_R2=[]
    valid_R2=[]
    y_train_pred=[]
    y_test_pred=[]
    y_valid_pred=[]
    BestParams={}
    # hyperparameter sets of each model 
    models=['RNN','LSTM','GRU']
    # decoders = [
    #     SimpleRNNDecoder(),
    #     LSTMDecoder(),
    #     GRUDecoder()]
    # params=[{'units':(50,100.99),'dropout':(0,0.2),'num_epochs':(2,5.99)}]

    params=[{'units':(50,600),'dropout':(0,0.6),'num_epochs':(2,31)}]
    initpoints=10
    niter=10
    k=10

    #split training, testing datasets, and Z score input 'X' and 'y'
    # X_train_temp, X_test, y_train_temp, y_test = train_test_split(X, y, test_size=0.2)
    # X_train, X_valid, y_train, y_valid = train_test_split(X_train_temp, y_train_temp, test_size=0.2)
    training_range=[.2,1]
    valid_range=[0,.1]
    testing_range=[.1,.2]
    num_examples=X.shape[0]
    training_set=np.arange(np.int(np.round(training_range[0]*num_examples)),np.int(np.round(training_range[1]*num_examples)))
    valid_set=np.arange(np.int(np.round(valid_range[0]*num_examples)),np.int(np.round(valid_range[1]*num_examples)))    
    testing_set=np.arange(np.int(np.round(testing_range[0]*num_examples)),np.int(np.round(testing_range[1]*num_examples)))
    #Get training data
    X_train=X[training_set,:,:]
    y_train=y[training_set,:]   
    #Get testing data
    X_test=X[testing_set,:,:]
    y_test=y[testing_set,:]
    #Get validation data
    X_valid=X[valid_set,:,:]
    y_valid=y[valid_set,:]

    X_mean=np.nanmean(X,axis=0)
    X_std=np.nanstd(X,axis=0)
    X_train=np.nan_to_num((X_train-X_mean)/X_std)
    X_valid=np.nan_to_num((X_valid-X_mean)/X_std)
    X_test=np.nan_to_num((X_test-X_mean)/X_std)  
    
    #Zero-center outputs
    y_mean=np.mean(y,axis=0)
    y_train=y_train-y_mean
    y_test=y_test-y_mean
    y_valid=y_valid-y_mean

    # # RNN DECODERS
    # def RNN_evaluate(units,dropout,num_epochs):
    #     units=int(units)
    #     num_epochs=int(num_epochs)
    #     model_RNN=SimpleRNNDecoder(units,dropout,num_epochs)
    #     model_RNN.fit(X_train,y_train)
    #     y_valid_pred=model_RNN.predict(X_valid)
    #     return np.mean(get_R2(y_valid,y_valid_pred))
    # # TUNING DECODERS
    # RNN_BO=BayesianOptimization(RNN_evaluate,params[0],verbose=0)
    # RNN_BO.maximize(init_points=initpoints, n_iter=niter, kappa=k) 
    # best_params=RNN_BO.max['params']
    # best_params['units']=int(best_params['units'])
    # best_params['num_epochs']=int(best_params['num_epochs'])
    # # predict test data
    # model_RNN=SimpleRNNDecoder(best_params['units'],best_params['dropout'],best_params['num_epochs'])
    # model_RNN.fit(X_train,y_train)
    # y_valid_pred_temp=model_RNN.predict(X_valid)
    # y_test_pred_temp=model_RNN.predict(X_test)
    # y_valid_pred.append(y_valid_pred_temp)
    # y_test_pred.append(y_test_pred_temp)
    # valid_R2.append(np.mean(get_R2(y_valid,y_valid_pred_temp)))
    # test_R2.append(np.mean(get_R2(y_test,y_test_pred_temp)))
    # BestParams[models[0]]=best_params

    # LSTM DECODERS
    def LSTM_evaluate(units,dropout,num_epochs):
        units=int(units)
        num_epochs=int(num_epochs)
        model_LSTM=LSTMDecoder(units,dropout,num_epochs)
        model_LSTM.fit(X_train,y_train)
        y_valid_pred=model_LSTM.predict(X_valid)
        return np.mean(get_R2(y_valid,y_valid_pred))
    # TUNING DECODER
    LSTM_BO=BayesianOptimization(LSTM_evaluate,params[0],verbose=1)
    LSTM_BO.maximize(init_points=initpoints, n_iter=niter, kappa=k) 
    best_params=LSTM_BO.max['params']
    best_params['units']=int(best_params['units'])
    best_params['num_epochs']=int(best_params['num_epochs'])
    # predict test data
    model_LSTM=LSTMDecoder(best_params['units'],best_params['dropout'],best_params['num_epochs'])
    model_LSTM.fit(X_train,y_train)
    y_train_pred_temp=model_LSTM.predict(X_train)
    y_valid_pred_temp=model_LSTM.predict(X_valid)
    y_test_pred_temp=model_LSTM.predict(X_test)
    y_train_pred.append(y_train_pred_temp)
    y_valid_pred.append(y_valid_pred_temp)
    y_test_pred.append(y_test_pred_temp)
    train_R2.append(np.mean(get_R2(y_train,y_train_pred_temp)))
    valid_R2.append(np.mean(get_R2(y_valid,y_valid_pred_temp)))
    test_R2.append(np.mean(get_R2(y_test,y_test_pred_temp)))
    BestParams[models[1]]=best_params

    # GRU DECODERS
    def GRU_evaluate(units,dropout,num_epochs):
        units=int(units)
        num_epochs=int(num_epochs)
        model_GRU=GRUDecoder(units,dropout,num_epochs)
        model_GRU.fit(X_train,y_train)
        y_valid_pred=model_GRU.predict(X_valid)
        return np.mean(get_R2(y_valid,y_valid_pred))
    # TUNING DECODERS
    GRU_BO=BayesianOptimization(GRU_evaluate,params[0],verbose=0)
    GRU_BO.maximize(init_points=initpoints, n_iter=niter, kappa=k) 
    best_params=GRU_BO.max['params']
    best_params['units']=int(best_params['units'])
    best_params['num_epochs']=int(best_params['num_epochs'])
    # predict test data
    model_GRU=GRUDecoder(best_params['units'],best_params['dropout'],best_params['num_epochs'])
    model_GRU.fit(X_train,y_train)
    y_train_pred_temp=model_GRU.predict(X_train)
    y_valid_pred_temp=model_GRU.predict(X_valid)
    y_test_pred_temp=model_GRU.predict(X_test)
    y_train_pred.append(y_train_pred_temp)
    y_valid_pred.append(y_valid_pred_temp)
    y_test_pred.append(y_test_pred_temp)
    train_R2.append(np.mean(get_R2(y_train,y_train_pred_temp)))
    valid_R2.append(np.mean(get_R2(y_valid,y_valid_pred_temp)))
    test_R2.append(np.mean(get_R2(y_test,y_test_pred_temp)))
    BestParams[models[2]]=best_params

    return train_R2, valid_R2, test_R2, y_train_pred, y_valid_pred, y_test_pred, y_train, y_valid, y_test, BestParams
        #Get training data
        X_train = X[train, :, :]
        y_train = y[train, :]

        #Get testing data
        X_test = X[test, :, :]
        y_test = y[test, :]

        #Fit model
        model_rnn.fit(X_train, y_train)

        #Get predictions
        y_test_predicted_rnn = model_rnn.predict(X_test)

        #Get metric of fit
        R2s_tmp[i, j, :] = get_R2(y_test, y_test_predicted_rnn)
        print('R2s:', R2s_tmp[i, j, :])

plt.errorbar(fractions_of_data**-1, np.mean(R2s_tmp[:, :, 1], axis=1),
             np.std(R2s_tmp[:, :, 1], axis=1))

plt.savefig("perf_over_data_dt" + str(60 * dt_ratio) + ".eps")

##Declare model
#model_lstm=LSTMDecoder(units=400,dropout=0,num_epochs=5)
#
##Fit model
#model_lstm.fit(X_train,y_train)
#
##Get predictions
#y_valid_predicted_lstm = model_lstm.predict(X_valid)