Пример #1
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Bidirectional, TimeDistributed
from tensorflow.keras.callbacks import EarlyStopping

model = Sequential()
model.add(
    Embedding(output_dim=output_length,
              input_dim=token_num,
              input_length=data_length))
model.add(Dropout(dropout))

model.add(Bidirectional(LSTM(lstm_dim), merge_mode='sum'))
model.add(Dropout(dropout))

model.add(Dense(units=256, activation='relu'))
model.add(Dropout(dropout))

model.add(Dense(units=1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

es = EarlyStopping(monitor='val_loss', patience=5, verbose=2)

train_history = model.fit(x=x_train,
                          y=y_train_data,
Пример #2
0
def build_model():
    """
    Description:
        Building DCBLSTM model
    Args:
        None
    Returns:
        None

    """

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    ######## 3x1D-Convolutional Layers with BatchNormalization, Dropout and MaxPooling ########

    conv_layer1 = Conv1D(16, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_1 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer2 = Conv1D(32, 7, padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_2 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer3 = Conv1D(64, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_3 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    ##maybe try removing dropout after batchnorm - batchnorm acts as a form of regularisation thus reduces need for dropout
    ############################################################################################

    #concatenate convolutional layers
    conv_features = Concatenate(axis=-1)(
        [max_pool_1D_1, max_pool_1D_2, max_pool_1D_3])
    # conv_features = Concatenate(axis=-1)([conv1_dropout, conv2_dropout, conv3_dropout])

    #dense layer before LSTM's
    lstm_dense = Dense(600, activation='relu',
                       name="after_cnn_dense")(conv_features)

    ######## Recurrent Bi-Directional Long-Short-Term-Memory Layers ########
    lstm_f1 = Bidirectional(
        LSTM(200,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(lstm_dense)

    lstm_f2 = Bidirectional(
        LSTM(200,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(lstm_f1)

    ############################################################################################

    #concatenate LSTM with convolutional layers
    concat_features = Concatenate(axis=-1)([lstm_f1, lstm_f2, lstm_dense])
    concat_features = Dropout(0.4)(concat_features)

    #Dense Fully-Connected DNN layers
    after_lstm_dense = Dense(600, activation='relu')(concat_features)
    after_lstm_dense_dropout = Dropout(0.3)(after_lstm_dense)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(after_lstm_dense_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #use Adam optimizer
    adam = Adam(lr=0.00015)

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision(),
                      AUC()
                  ])

    #print model summary
    model.summary()

    return model
Пример #3
0
poluicao = poluicao.reshape(-1,1)
poluicao_normalizada = normalizador.fit_transform(poluicao)

previsores = []
poluicao_real = []

for i in range(10, len(base)):
    previsores.append(base_treinamento_normalizada[i - 10:i, 0:6])
    poluicao_real.append(base_treinamento_normalizada[i, 0])

previsores, poluicao_real = np.array(previsores), np.array(poluicao_real)

regressor = Sequential()

regressor.add(LSTM(units = 100, return_sequences = True, input_shape = (previsores.shape[1], previsores.shape[2])))
regressor.add(Dropout(0.3))

regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))

regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))

regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))

regressor.add(Dense(units = 1, activation = "linear"))

regressor.compile(optimizer = "rmsprop", loss = "mean_squared_error",
                  metrics = ["mean_absolute_error"])
    scaler = MinMaxScaler()
    scaler.fit(train_df[TARGET].values.reshape(-1, 1))
    scaled_train_data = scaler.transform(train_df[TARGET].values.reshape(-1, 1))
    scaled_test_data = scaler.transform(test_df[TARGET].values.reshape(-1, 1))

    input_length = 15
    num_features = 1
    generator = TimeseriesGenerator(scaled_train_data, scaled_train_data, length=input_length, batch_size=1)

    cases_so_far = df[-1:].cumCasesByPublishDate.values[0]
    last_date = df[-1:].index.values[0]
    print(cases_so_far, last_date)

    lstm_model = Sequential()
    lstm_model.add(LSTM(100, activation='relu', input_shape=(input_length, num_features)))
    lstm_model.add(Dense(1))
    lstm_model.compile(optimizer='adam', loss='mse')
    lstm_model.summary()

    lstm_model.fit_generator(generator, epochs=20)

    losses_lstm = lstm_model.history.history['loss']

    lstm_predictions_scaled = list()
    batch = scaled_train_data[-input_length:]
    current_batch = batch.reshape((1, input_length, num_features))

    for i in range(len(test_df)):
        lstm_pred = lstm_model.predict(current_batch)[0]
        lstm_predictions_scaled.append(lstm_pred)
#%%
train_x, train_y = data[:, :-1], data[:, -1]
train_x
#%%
train_x = train_x.reshape((train_x.shape[0], n_seq, n_steps, 1))
train_x
#%%
# define model
model = Sequential()
model.add(TimeDistributed(Conv1D(filters=n_filters, kernel_size=n_kernel,
    activation='relu', input_shape=(None,n_steps,1))))
model.add(TimeDistributed(Conv1D(filters=n_filters, kernel_size=n_kernel,
    activation='relu')))
model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(n_nodes, activation='relu'))
model.add(Dense(n_nodes, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
# fit
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0)
#%%
# Para un round:
history = [x for x in train]
predictions = list()
# step over each time-step in the test set
for i in range(len(test)):
    # fit model and make forecast for history
    yhat = model_predict(model, history, config)

    # store forecast in list of predictions
 def __init__(self):
     super(XrsInputModel, self).__init__(name="XrsInputModel")
     self.layerNorm = LayerNormalization(axis=-2, input_shape=(-1, 10, 30))
     self.lstm = Bidirectional(LSTM(256, return_sequences=True, return_state=True))
     self.attention = bahdanau(128)
     self.attention_weights = None
max_fatures = 2000
tokenizer = Tokenizer(num_words=max_fatures, split=' ')
tokenizer.fit_on_texts(data['text'].values)
X = tokenizer.texts_to_sequences(data['text'].values)
X = pad_sequences(X)
Y = pd.get_dummies(data['sentiment']).values



# model building/model defenitions and training
embed_dim = 128
lstm_out = 196
batch_size = 32
epochs = 7
with graph.as_default():
    model = Sequential()
    model.add(Embedding(max_fatures, embed_dim,input_length = X.shape[1]))
    model.add(SpatialDropout1D(0.4))
    model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(3,activation='softmax'))
    model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
    model.fit(X, Y, epochs = epochs, batch_size=batch_size)
# saveing the model
    model.save_weights('model.h5')
with open("model.json","w") as f:
    f.write(model.to_json())
pickle.dump(tokenizer,open("tokenizer.pkl","wb"))
pickle.dump(X.shape[1],open("max_text_length.pkl","wb"))

    
def build_model(hp):
    num_layers = hp.Int('num_layers', 2, 8, default=6)

    # Define training condition and flags
    stnd_dev = Input(shape=(1,))
    mean = Input(shape=(1,))

    # Configuring Convolution Neural Network by functional API
    model1_input = Input(shape=x_train.shape[1:])
    filters = hp.Int('filters_0', 32, 256, step=32, default=64)

    model1 = Conv2D(filters, (3, 3), padding='same')(model1_input)
    model1 = Activation(leaky_relu)(model1)
    model1 = MaxPooling2D(pool_size=(2, 2))(model1)
    model1 = Dropout(0.25)(model1)

    for idx in range(1, num_layers-1):
        idx = str(idx)
        filters = hp.Int('filters_' + idx, 32, 256, step=32, default=64)

        model1 = Conv2D(filters, (3, 3), padding='same')(model1)
        model1 = Activation(leaky_relu)(model1)
        model1 = MaxPooling2D(pool_size=(2, 2))(model1)
        model1 = Dropout(0.25)(model1)

    filters = hp.Int('filters_' + str(num_layers-1), 32, 256, step=32, default=64)

    model1 = Conv2D(filters, (3, 3), padding='same')(model1)
    model1 = Activation(leaky_relu)(model1)
    model1 = MaxPooling2D(pool_size=(2, 2))(model1)
    model1 = Dropout(0.5)(model1)

    model1 = Flatten()(model1)

    model2_input = keras.layers.concatenate([stnd_dev, mean])

    emb_len = hp.Int('Output_dim', 32, 512, step=32, default=128)

    model2 = Embedding(max_features, emb_len, input_length=maxlen)(model2_input)

    num_layers_L = hp.Int('num_layers_L', 2, 8, default=6)
    for idx in range(num_layers_L):
        idx = str(idx)
        units = hp.Int('units_' + idx, 32, 512, step=32, default=64)

        # Configuring Feed-Forward Neural Network by functional API
        model2 = Bidirectional(LSTM(units, return_sequences=True))(model2)
        model2 = Dropout(0.25)(model2)

    units = hp.Int('units_' + str(num_layers_L), 32, 512, step=32, default=64)

    model2 = Bidirectional(LSTM(units))(model2)
    model2 = Dropout(0.5)(model2)

    print('Build model...')

    # Concatenate CNN and FFNN
    merged_vector = keras.layers.concatenate([model2, model1])

    output = Dense(num_classes, activation='softmax')(merged_vector)

    model = Model(inputs=[stnd_dev, mean, model1_input], outputs=output)

    optimizer_name = hp.Choice(
        'optimizer', ['adam', 'rmsprop', 'sgd'], default='adam')
    optimizer = keras.optimizers.get(optimizer_name)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
Пример #9
0


# print(x_train.shape)


x_predict = x_train[30:40]
y_answer = y_train[30:40]


#2. 모델 구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, Conv2D, MaxPooling2D, Flatten #LSTM도 layer

model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=(x_train.shape[1], 1))) 
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))



#3. 컴파일 및 훈련
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience=100, mode='auto')
Пример #10
0
userModel = Concatenate(axis=1)([usersGenderModel, usersAgeModel, usersJobIdModel])
# userModel = Flatten()(userModel)
userDense1 = Dense(16, activation='relu', kernel_regularizer = regularizers.l2(0.001))(userModel)

# ------------movie部分
moviesGenresInput = Input(shape=(moviesGenresInputDim, ), dtype="float32", name='movieGenres')
moviesGenresModel = Dense(16, activation='relu', use_bias=True,kernel_regularizer=regularizers.l2(0.001))(moviesGenresInput)
moviesGenresModel = Dropout(rate=dropoutRate)(moviesGenresModel)
moviesGenresModel = Dense(16, activation='relu', use_bias=True,)(moviesGenresModel)
moviesGenresModel = BatchNormalization(epsilon=0.001, momentum=0.99, axis=-1)(moviesGenresModel)
moviesGenresModel = Reshape((1,16))(moviesGenresModel)

moviesTitleInput = Input(shape=(15,), dtype="int32", name='movieTitle')
moviesTitleModel = Embedding(moviesTitleInputDim+1, 32, input_length=15)(moviesTitleInput)
moviesTitleModel = LSTM(16, activation='relu')(moviesTitleModel)
moviesTitleModel = Dense(16, activation='relu', use_bias=True, kernel_regularizer=regularizers.l2(0.001))(moviesTitleModel)
moviesTitleModel = BatchNormalization(epsilon=0.001, momentum=0.99, axis=-1)(moviesTitleModel)
moviesTitleModel = Reshape((1,16))(moviesTitleModel)

movieModel = Concatenate(axis=1)([moviesTitleModel, moviesGenresModel, ])
movieModel = Dense(16, activation='relu', kernel_regularizer = regularizers.l2(0.001))(movieModel)

# -----------combine
combineModel = Concatenate(axis=1)([userDense1, movieModel])
combineModel = Flatten()(combineModel)
combineModel = Dense(32, activation='relu', use_bias=True, kernel_regularizer=regularizers.l2(0.001))(combineModel)
combineModel = Dense(16, activation='relu', use_bias=True, kernel_regularizer=regularizers.l2(0.00))(combineModel)
combineModel = Dense(8, activation='relu', use_bias=True, kernel_regularizer=regularizers.l2(0.001))(combineModel)
combineModel = Dense(4, activation='relu', use_bias=True, kernel_regularizer=regularizers.l2(0.00))(combineModel)
combineModel = Dense(1, activation='relu')(combineModel)
Пример #11
0
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)

x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], 1)

#2. 모델구성

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM

model = Sequential()
model.add(LSTM(300, activation='relu', input_shape=(13, 1)))
model.add(Dense(200, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(3, activation='softmax'))

# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='acc')

from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='acc', patience=20, mode='max')
Пример #12
0
corpus = np.array(corpus)
corpus = corpus.reshape(len(x['title']),20)
#converting text data into neumeric
voc_size=10000
# onehot_rep = [one_hot(word,voc_size)  for word in corpus]
#
#
sentlen =20
# embedding_doc = pad_sequences(onehot_rep,padding='pre',maxlen=sentlen)


#Actual model implementing
embedding_feature = 60
model = Sequential()
model.add(Embedding(voc_size,embedding_feature,input_length=sentlen))
model.add(LSTM(64,return_sequences=True))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dropout(0.5))
model.add(LSTM(32))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dropout(0.5))
model.add(Dense(1,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])

x_final = np.array(corpus)

#spliting data
from sklearn.model_selection import train_test_split
x_temp, x_test, y_temp, y_test = train_test_split(x_final,y,test_size=0.2,random_state=1)
x_train,x_val,y_train,y_val = train_test_split(x_temp,y_temp,test_size=0.25,random_state=101)
Пример #13
0
K.clear_session()

latent_dim = 300
embedding_dim = 100

# Encoder
encoder_inputs = Input(shape=(max_text_len, ))

# embedding layer
enc_emb = Embedding(x_voc, embedding_dim, trainable=True)(encoder_inputs)

# encoder lstm 1
encoder_lstm1 = LSTM(latent_dim,
                     return_sequences=True,
                     return_state=True,
                     dropout=0.4,
                     recurrent_dropout=0.4)
encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb)

# encoder lstm 2
encoder_lstm2 = LSTM(latent_dim,
                     return_sequences=True,
                     return_state=True,
                     dropout=0.4,
                     recurrent_dropout=0.4)
encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1)

# encoder lstm 3
encoder_lstm3 = LSTM(latent_dim,
                     return_state=True,
def generic_multiple_series_lookback(
        input_csv_file,
        sel_features,
        csv_sep=",",
        train_split=0.67,
        look_back=1,
        lstm_units=4,
        batch_size=1,
        epochs=100,
        verbose=2,
        plot_prefix="",
        na_values=-1,
        ncols=2,
        force_retrain=True

):
    # Cuda setup
    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)

    # fix random seed for reproducibility
    np.random.seed(7)

    # load the dataset
    dataframe = pd.read_csv(input_csv_file, sep=csv_sep, na_values=na_values)
    m = dataframe.shape[0]
    n_features = len(sel_features)
    dataset = np.zeros(shape=(m, n_features))

    print(f"Only {n_features} of {dataframe.shape[1]} total feature will be used.")
    print(f"Selected features:", sel_features)

    # Choose only selected features in dataset
    for i, feature in enumerate(sel_features):
        col_values = dataframe[feature].values
        dataset[:, i] = np.array(col_values).transpose()
        dataset[:, i] = dataset[:, i].astype('float32')

    # normalize the dataset
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(dataset)

    # split into train and test sets
    train_size = int(m * train_split)
    train = dataset[0:train_size, :]
    test = dataset[train_size:m, :]

    # reshape into X=t and Y=t+1
    assert len(train) > 0, "Train array length must me > 0"

    mt = train_size
    train_input = np.zeros((mt - look_back, look_back, n_features))
    train_target = np.zeros((mt - look_back, n_features))
    for i in range(n_features):
        train_col = train[:, i]
        train_col = np.array([train_col]).transpose()
        col_train_input, col_train_target = create_dataset(train_col, look_back)
        train_input[:, :, i] = col_train_input
        train_target[:, i] = col_train_target

    mv = test.shape[0]
    test_input = np.zeros((mv - look_back, look_back, n_features))
    test_target = np.zeros((mv - look_back, n_features))
    for i in range(n_features):
        test_col = test[:, i]
        test_col = np.array([test_col]).transpose()
        col_test_input, col_test_target = create_dataset(test_col, look_back)
        test_input[:, :, i] = col_test_input
        test_target[:, i] = col_test_target

    print(f"Train input:target shape = {train_input.shape}:{train_target.shape}")
    print(f"Test input:target shape = {test_input.shape}:{test_target.shape}")

    # Input has to be in form [samples, time steps, sel_features]
    #   - Samples. One sequence is one sample. A batch is comprised of one or more samples.
    #   - Time Steps. One time step is one point of observation in the sample.
    #   - Features. One feature is one observation at a time step.

    # create and fit the LSTM network
    model = Sequential()
    model.add(LSTM(lstm_units, input_shape=(look_back, n_features)))
    model.add(Dense(n_features))
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.summary()

    model_path = f'models/{plot_prefix}_model'
    if force_retrain is False and os.path.isdir(model_path):
        print('Model checkpoint found, skipping training')
        print(f'Loading model {model_path}...')
        model = keras.models.load_model(model_path)
        print(f'Model loaded')
    else:
        if force_retrain:
            print('Force retrain set. Use force_retrain=False to keep trained model.')

        print('Training model...')
        model.fit(train_input, train_target, epochs=epochs, batch_size=batch_size, verbose=verbose)
        model.save(model_path)

    # make predictions
    train_predict = model.predict(train_input)
    test_predict = model.predict(test_input)

    rec_test_predict = np.zeros_like(test_target)
    # Ricorsivamente
    # Il primo passo avrò L valori e predirrò il valore L+1, poi andrò in base alle predizioni
    # 0,1,2 -> p1
    # 1,2,p1 -> p2
    # 2,p1,p2 -> p3
    # p1,p2,p3 -> p4
    # Con lookback L avremo L passi dove usiamo i valori di validazione. In un caso corretto probabilmente dovremmo
    # usare i vecchi valori di train.

    rec_test_predict[0:look_back, :] = test_input[0, :, :]

    for i in range(look_back, mv - look_back):
        pred_input = np.expand_dims(rec_test_predict[i - look_back:i, :], axis=0)
        current_prediction = model.predict(pred_input)
        rec_test_predict[i, :] = current_prediction

    # invert predictions
    train_predict = scaler.inverse_transform(train_predict)
    train_target = scaler.inverse_transform(train_target)
    test_predict = scaler.inverse_transform(test_predict)
    test_target = scaler.inverse_transform(test_target)
    rec_test_predict = scaler.inverse_transform(rec_test_predict)

    train_score = np.zeros(n_features)
    test_score = np.zeros(n_features)

    nrows = int(n_features / ncols)
    figsize = (9 * ncols, 6 * nrows)
    fig, axes = plt.subplots(
        nrows=nrows, figsize=figsize, ncols=ncols, dpi=160, facecolor="w", edgecolor="k"
    )
    fig.suptitle(f'Predictions, lookback={look_back}, {epochs} epochs')
    for i in range(n_features):
        # calculate root mean squared error
        train_score[i] = math.sqrt(mean_squared_error(train_target[:, i], train_predict[:, i]))
        print(f'Train Score {sel_features[i]}: {train_score[i]} RMSE')
        test_score[i] = math.sqrt(mean_squared_error(test_target[:, i], test_predict[:, i]))
        print(f'Test Score {sel_features[i]}: {test_score[i]} RMSE')

        # shift train predictions for plotting
        trainPredictPlot = np.empty_like(dataset)
        trainPredictPlot[:, :] = np.nan
        trainPredictPlot[look_back:train_size, i] = train_predict[:, i]
        # shift test predictions for plotting
        testPredictPlot = np.empty_like(dataset)
        testPredictPlot[:, :] = np.nan
        testPredictPlot[train_size + look_back:len(dataset), i] = test_predict[:, i]
        t_dataset = scaler.inverse_transform(dataset)

        # shift recursive predictions for plotting
        rec_testPredictPlot = np.empty_like(dataset)
        rec_testPredictPlot[:, :] = np.nan
        rec_testPredictPlot[train_size + look_back:len(dataset), i] = rec_test_predict[:, i]

        # Plot
        if n_features > 1:
            row = int(i // ncols)
            col = i % ncols
            cur_axes = axes[row, col]
        else:
            cur_axes = axes

        cur_axes.set_title(f"{sel_features[i]}")
        cur_axes.plot(t_dataset[:, i], label=f"{sel_features[i]}", linestyle="-")
        cur_axes.plot(trainPredictPlot[:, i], label="Train predictions", linestyle="-", fillstyle='none')
        cur_axes.plot(testPredictPlot[:, i], label="Validation predictions", linestyle="-", fillstyle='none')
        cur_axes.plot(rec_testPredictPlot[:, i], label="Recursive predictions", linestyle="-", fillstyle='none')
        cur_axes.legend()

    plt.savefig(f'{PLOT_DIR}/{plot_prefix}_predictions_{look_back}_{epochs}.png')
    plt.show()
    plt.close()

    df = pd.DataFrame({"Train RMSE": train_score, "Validation RMSE": test_score}, index=sel_features)
    ax = df.plot.bar(color=["SkyBlue", "IndianRed"], rot=0, title=f"RMSE, lookback={look_back}, {epochs} epochs")
    ax.set_xlabel("Feature")
    ax.set_xticklabels(sel_features, rotation=45)
    plt.tight_layout()
    plt.savefig(f'{PLOT_DIR}/{plot_prefix}_RMSE_{look_back}_{epochs}.png', bbox_inches="tight")
    plt.show()
Пример #15
0
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print('Train...')
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
Пример #16
0
# print(y_test.shape)
# (3264, 15)
# (3264,)
# (816, 15)
# (816,)

# ------------------------------------------------------------
# 모델 구성
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint

# 임베딩 벡터 차원 100으로 정하고 lstm 이용
model = Sequential()
model.add(Embedding(vocab_size, 50))
model.add(LSTM(128))
model.add(Dense(1, activation='sigmoid'))

# callbacks 정의
stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=8)
file_path = '../NLP/modelcheckpoint/project_01.h5'
mc = ModelCheckpoint(filepath=file_path,
                     monitor='val_acc',
                     mode='max',
                     save_best_only=True,
                     verbose=1)

# 컴파일, 훈련
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train,
                    y_train,
Пример #17
0
def prep_classifier():

    model = Sequential()

    model.add(Reshape((16, 16)))
    model.add(Bidirectional(LSTM(16, return_sequences=True)))
    model.add(Bidirectional(LSTM(16, return_sequences=True), merge_mode="ave"))

    model.add(Reshape((256, 1)))

    model.add(Conv1D(16, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Conv1D(32, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Conv1D(64, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Conv1D(128, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Flatten())
    qdata = Sequential()
    qdata.add(Embedding(164, 64, input_length=3))
    qdata.add(Flatten())

    join = Sequential()
    join.add(Concatenate())

    ###PRUEBA###########################################
    join.add(Dense(1024))
    join.add(BatchNormalization(momentum=batch_norm))
    join.add(Activation("relu"))
    ###PRUEBA###########################################

    join.add(Dense(512))
    join.add(BatchNormalization(momentum=batch_norm))
    join.add(Activation("relu"))
    join.add(Dense(64))
    join.add(BatchNormalization(momentum=batch_norm))
    join.add(Activation("relu"))

    join.add(Dense(9, activation="softmax"))

    signal = Input(shape=(256, 1))
    qualdata = Input(shape=(3, ))
    feat_signal = model(signal)
    feat_qdata = qdata(qualdata)

    out = join([feat_signal, feat_qdata])

    classifier = Model([signal, qualdata], out)

    return classifier
Пример #18
0
    ytrain = data[:len_train, -1, :]

    xtest = data[len_train:, :-1, :]
    ytest = data[len_train:, -1, :]

    return xtrain, ytrain, xtest, ytest


xtrain, ytrain, xtest, ytest = getprocessedvalues(scaled_price, 20)

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Activation

model = models.Sequential()
model.add(LSTM(
    30,
    input_shape=(19, 1)))  #input_shape=(length of sequence,num of features)
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam',
              loss='mean_squared_error',
              metrics=['accuracy'])

hist = model.fit(xtrain,
                 ytrain,
                 epochs=100,
                 batch_size=30,
                 validation_data=(xtest, ytest))
model.evaluate(xtest, ytest)

from tensorflow.keras.models import load_model
Пример #19
0
Y = []
for t in range(len(series) - T):
	x = series[t:t+T]
	X.append(x)
	y = series[t+T]
	Y.append(y)

X = np.array(X).reshape(-1,T, 1)#N*T*D
Y = np.array(Y)
N = len(X)
print(X.shape, Y.shape)


#Build the model............
i = Input(shape = (T, D))
x = LSTM(10)(i)
x = Dense(1)(x)
model = Model(i, x)

model.compile(loss = 'mse', optimizer = Adam(lr=0.5))
r = model.fit(X[:-N//2], Y[:-N//2], epochs = 50, validation_data = (X[-N//2:], Y[-N//2:]),)

#Plot the loss................
plt.plot(r.history['loss'], label = 'loss')
plt.plot(r.history['val_loss'], label = 'val_loss')
plt.legend()
plt.show()


#wrong methods for forecasting......................
validation_target = Y[-N//2:]
Пример #20
0
def LSTMModel(x_train, y_train, x_test, y_test, number_of_memories):

    np.random.seed(7)
    np.random.shuffle(x_train)
    np.random.seed(7)
    np.random.shuffle(y_train)
    tf.random.set_seed(7)

    model = tf.keras.Sequential([
        LSTM(number_of_memories, return_sequences=True),
        Dropout(0.2),
        LSTM(100),
        Dropout(0.2),
        Dense(10)
    ])

    model.compile(optimizer=tf.keras.optimizers.Adam(0.001), loss='huber_loss')

    checkpoint_save_path = "./checkpoint/DCG_LPL.ckpt"

    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=checkpoint_save_path,
        save_weights_only=True,
        save_best_only=True,
        monitor='val_loss')

    history = model.fit(x_train,
                        y_train,
                        batch_size=64,
                        epochs=1,
                        validation_data=(x_test, y_test),
                        validation_freq=1)

    model.summary()
    '''
    file = open('./weights.txt', 'w')  
    for v in model.trainable_variables:
        file.write(str(v.name) + '\n')
        file.write(str(v.shape) + '\n')
        file.write(str(v.numpy()) + '\n')
    file.close()

    loss = history.history['loss']
    val_loss = history.history['val_loss'] 

    plt.plot(loss, label='Training Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.title('Training and Validation Loss')
    plt.legend()
    plt.show()
    '''
    ################## predict ######################

    predicted_bb = model.predict(x_test)
    '''
    plt.plot(y_test[:100], color='red', label='MaoTai Stock Price')
    plt.plot(predicted_bb[:100], color='blue', label='Predicted MaoTai Stock Price')
    plt.title('MaoTai Stock Price Prediction')
    plt.xlabel('Time')
    plt.ylabel('MaoTai Stock Price')
    plt.legend()
    plt.show()
    '''
    #del model
    return predicted_bb
Пример #21
0
    y_train.append(train_data[i, 0])
    if i <= 61:
        print(x_train)
        print(y_train)
        print()
"""using numpy to create train and test data"""

x_train, y_train = np.array(x_train), np.array(y_train)

x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train.shape
"""Building the LSTM network model"""

#Build the LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))

#Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')

model.fit(x_train, y_train, batch_size=1, epochs=1)

#Create a new array containing scaled values from index 1543 to 2002
test_data = scaled_data[training_data_len - 60:, :]
#Create the data sets x_test and y_test
x_test = []
y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
Пример #22
0
                                random_index = np.random.permutation(25000)
                                x_train = x_train_all[random_index[:20000]]
                                y_train = y_train_all[random_index[:20000]]
                                x_val = x_train_all[random_index[20000:]]
                                y_val = y_train_all[random_index[20000:]]
                                maxlength = Max
                                x_test = sequence.pad_sequences(
                                    x_test, maxlen=maxlength)
                                x_train_seq = sequence.pad_sequences(
                                    x_train, maxlen=maxlength)
                                x_val_seq = sequence.pad_sequences(
                                    x_val, maxlen=maxlength)

                                model_lstm = Sequential()
                                model_lstm.add(Embedding(Dict, Emb))
                                model_lstm.add(LSTM(cell))
                                model_lstm.add(Dense(1, activation='sigmoid'))
                                model_lstm.compile(optimizer='adam',
                                                   loss='binary_crossentropy',
                                                   metrics=['accuracy'])
                                model_lstm.summary()
                                print("max length :", maxlength, "Vocab size",
                                      Dict, "Embedded size", Emb, "Cell", cell)
                                history = model_lstm.fit(
                                    x_train_seq,
                                    y_train,
                                    epochs=10,
                                    batch_size=128,
                                    validation_data=(x_val_seq, y_val))
                                print("training end!")
                                loss, accuracy = model_lstm.evaluate(x_test,
        context_vector = tf.reduce_sum(context_vector, axis=1)

        return context_vector, attention_weights


from tensorflow.keras.layers import Dense, Embedding, Bidirectional, LSTM, Concatenate, Dropout
from tensorflow.keras import Input, Model
from tensorflow.keras import optimizers
import os

sequence_input = Input(shape=(max_len, ), dtype='int32')
embedded_sequences = Embedding(vocab_size,
                               128,
                               input_length=max_len,
                               mask_zero=True)(sequence_input)
lstm = Bidirectional(LSTM(64, dropout=0.5,
                          return_sequences=True))(embedded_sequences)
lstm, forward_h, forward_c, backward_h, backward_c = Bidirectional \
  (LSTM(64, dropout=0.5, return_sequences=True, return_state=True))(lstm)

state_h = Concatenate()([forward_h, backward_h])  # 은닉 상태
state_c = Concatenate()([forward_c, backward_c])  # 셀 상태

attention = BahdanauAttention(64)  # 가중치 크기 정의
context_vector, attention_weights = attention(lstm, state_h)
dense1 = Dense(20, activation="relu")(context_vector)
dropout = Dropout(0.5)(dense1)
output = Dense(1, activation="sigmoid")(dropout)
model = Model(inputs=sequence_input, outputs=output)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Пример #24
0
[1. 0. 0.]
 [1. 0. 0.]
 [1. 0. 0.]
 [1. 0. 0.]
 [1. 0. 0.]
 [1. 0. 0.]
 [1. 0. 0.]
 [1. 0. 0.]
'''

#2. 모델
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM

model = Sequential()
model.add(LSTM(10, activation='relu', input_shape=(4, 1)))
model.add(Dense(5, activation='relu'))
model.add(Dense(3, activation='softmax'))  #y값 3개이다(0,1,2)
#분류하고자 하는 노드에 개수를 output나오게 해라
#linear은 선형, relu는 회귀, sigmoid는 이진분류

#3. 컴파일, 훈련
#mean_squared_error
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc', 'mae'])
####loss가 이진 분류일 때는binary_crossentropy(0,1만 추출)
model.fit(x_train,
          y1_train,
          epochs=150,
          validation_split=0.2,
def build_neural_network_model(Recurrent_Neural_Network: List[Any],
                               n_inputs: int, n_days: int) -> Sequential:
    """
    Builds neural net from config_neural_network_models.py
    Parameters
    ----------
    Recurrent_Neural_Network: List[Any]
        List of layers with parameters as a dictionary in the file
    n_inputs: int
        Number of days that will be fed into the NN
    n_days: int
        Number of days the NN wants to predict

    Returns
    -------
    model: Sequential
        Keras sequential model with layers from the file

    """
    model = Sequential()

    for idx_layer, d_layer in enumerate(Recurrent_Neural_Network):
        # Recurrent Neural Network
        if str(*d_layer) == "SimpleRNN":
            # Is this the input layer? If so, define input_shape
            if idx_layer == 0:
                model.add(
                    SimpleRNN(**d_layer["SimpleRNN"],
                              input_shape=(n_inputs, 1)))
            # Is this the last output layer? If so, set units to prediction days
            elif idx_layer == (len(Recurrent_Neural_Network) - 1):
                model.add(SimpleRNN(**d_layer["SimpleRNN"], units=n_days))
            else:
                model.add(SimpleRNN(**d_layer["SimpleRNN"]))

        # Long-Short Term-Memory
        elif str(*d_layer) == "LSTM":
            # Is this the input layer? If so, define input_shape
            if idx_layer == 0:
                model.add(LSTM(**d_layer["LSTM"], input_shape=(n_inputs, 1)))
            # Is this the last output layer? If so, set units to prediction days
            elif idx_layer == (len(Recurrent_Neural_Network) - 1):
                model.add(LSTM(**d_layer["LSTM"], units=n_days))
            else:
                model.add(LSTM(**d_layer["LSTM"]))

        # Dense (Simple Neuron)
        elif str(*d_layer) == "Dense":
            # Is this the input layer? If so, define input_shape
            if idx_layer == 0:
                model.add(Dense(**d_layer["Dense"], input_dim=n_inputs))
            # Is this the last output layer? If so, set units to prediction days
            elif idx_layer == (len(Recurrent_Neural_Network) - 1):
                model.add(Dense(**d_layer["Dense"], units=n_days))
            else:
                model.add(Dense(**d_layer["Dense"]))

        # Conv1D Layer
        elif str(*d_layer) == "Conv1D":
            if idx_layer == 0:
                model.add(
                    Conv1D(**d_layer["Conv1D"], input_shape=(n_inputs, 1)))
            else:
                model.add(Conv1D(**d_layer["Conv1D"]))
        # Max Pooling Layer for after Conv Layer
        elif str(*d_layer) == "MaxPool1D":
            model.add(MaxPool1D(**d_layer["MaxPool1D"]))
        # Allow for if user wants to do average pooling
        elif str(*d_layer) == "AvgPool1D":
            model.add(AvgPool1D(**d_layer["AvgPool1D"]))
        # Dropout (Regularization)
        elif str(*d_layer) == "Dropout":
            model.add(Dropout(**d_layer["Dropout"]))
        # Flatten layer for Convolutions
        elif str(*d_layer) == "Flatten":
            model.add(Flatten())
        else:
            print(f"Incorrect neuron type: {str(*d_layer)}")

    return model
Пример #26
0
    load_model = json_file.read()
    json_file.close()
    model = model_from_json(load_model)
    # load weights into new model
    model.load_weights("model.LSTM")
except:
    model = Sequential()
    # Non-trainable embeddidng layer
    model.add(
        Embedding(vocab_size,
                  output_dim=EMBEDDING_DIM,
                  weights=[embedding_vectors],
                  input_length=maxlen,
                  trainable=False))
    # LSTM
    model.add(LSTM(units=128))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    model.fit(X_train, y_train, validation_split=0.3, epochs=6)

    model_json = model.to_json()
    with open("model.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to LSTM
    model.save_weights("model.LSTM")


#Train test
def trainLSTM(X_test, y_test):
def create_network(n_commands,
                   n_value1,
                   n_durations,
                   embed_size=100,
                   rnn_units=256,
                   use_attention=False):
    """ create the structure of the neural network """

    commands_in = Input(shape=(None, ), name="commands_channels_in")
    value1_in = Input(shape=(None, ), name="values1_in")
    durations_in = Input(shape=(None, ), name="durations_in")

    x1 = Embedding(n_commands, embed_size)(commands_in)
    x2 = Embedding(n_value1, embed_size)(value1_in)
    x3 = Embedding(n_durations, embed_size)(durations_in)

    x = Concatenate()([x1, x2, x3])

    x = LSTM(rnn_units, return_sequences=True)(x)
    #x = Dropout(0.2)(x)

    if use_attention:

        x = LSTM(rnn_units, return_sequences=True)(x)
        #x = Dropout(0.2)(x)

        e = Dense(1, activation='tanh')(x)
        e = Reshape([-1])(e)
        alpha = Activation('softmax')(e)

        alpha_repeated = Permute([2, 1])(RepeatVector(rnn_units)(alpha))

        c = Multiply()([x, alpha_repeated])
        c = Lambda(lambda xin: K.sum(xin, axis=1),
                   output_shape=(rnn_units, ))(c)

    else:
        c = LSTM(rnn_units)(x)
        #c = Dropout(0.2)(c)

    commands_out = Dense(n_commands,
                         activation='softmax',
                         name='commands_channels_out')(c)
    value1_out = Dense(n_value1, activation='softmax', name='values1_out')(c)
    durations_out = Dense(n_durations,
                          activation='softmax',
                          name='durations_out')(c)

    model = Model([commands_in, value1_in, durations_in],
                  [commands_out, value1_out, durations_out])

    if use_attention:
        att_model = Model([commands_in, value1_in, durations_in], alpha)
    else:
        att_model = None

    opti = RMSprop(lr=0.005)
    model.compile(loss=[
        'categorical_crossentropy', 'categorical_crossentropy',
        'categorical_crossentropy'
    ],
                  optimizer=opti)

    return model, att_model
Пример #28
0
    padded_sentence_target_index[0:pad_len] = pad_len * [target_index['<PAD>']]
    padded_sentence_target_index.extend(sent_target_idx[:30])
    padded_sentence_target_indexes.append(padded_sentence_target_index)

import numpy
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM as LSTM, SimpleRNN, GRU, TimeDistributed
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing import sequence

X_train = np.array(padded_sentence_indexes, dtype=float)
y_train = np.array(padded_sentence_target_indexes, dtype=float)

X_train = X_train.reshape(1, X_train.shape[0], X_train.shape[1])
y_train = y_train.reshape(1, y_train.shape[0], y_train.shape[1])

embedding_vecor_length = 80
model2 = Sequential()
model2.add(LSTM(1024, activation='relu', return_sequences=True))
model2.add(TimeDistributed(Dense(30)))

model2.compile(loss='mse', optimizer='adam')

model2.fit(X_train, y_train, epochs=3, batch_size=64)

print(model2.summary())

print("done")
Пример #29
0
print(tokenizer.word_index['one'])
print(tokenizer.word_index['jeremy'])
print(tokenizer.word_index['lanigan'])

print(xs[6])

print(ys[6])

print(xs[5])
print(ys[5])

print(tokenizer.word_index)

model = Sequential()
  model.add(Embedding(total_words, 64, input_length=max_sequence_len-1))
  model.add(Bidirectional(LSTM(20)))
  model.add(Dense(total_words, activation='softmax'))
  model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
  history = model.fit(xs, ys, epochs=500, verbose=1)

import matplotlib.pyplot as plt


def plot_graphs(history, string):
  plt.plot(history.history[string])
  plt.xlabel("Epochs")
  plt.ylabel(string)
  plt.show()

plot_graphs(history, 'accuracy')
Пример #30
0
def model():
    df, last_date = t1.get_timeline()
    # print(b)
    df = df.drop(['total_deaths', 'new_deaths', 'iso_code', 'continent'],
                 axis=1)
    df = df[45:]  #removing datas where there is no new cases
    df = df.reset_index()

    df1 = df['new_cases']
    scaler = MinMaxScaler()
    df1 = scaler.fit_transform(np.array(df1).reshape(-1, 1))

    ##splitting dataset into train and test split
    training_size = int(len(df1) * 0.75)
    test_size = len(df1) - training_size
    train_data, test_data = df1[0:training_size, :], df1[
        training_size:len(df1), :1]

    # reshape into X=t,t+1,t+2,t+3 and Y=t+4
    time_step = 3
    X_train, y_train = create_dataset(train_data, time_step)
    X_test, y_test = create_dataset(test_data, time_step)
    # print(y_test)

    # reshape input to be [samples, time steps, features] which is required for LSTM
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
    # X_test.shape

    model = Sequential()
    model.add(
        LSTM(128,
             activation='relu',
             use_bias=True,
             bias_initializer='ones',
             return_sequences=True,
             input_shape=(time_step, 1)))
    model.add(Dropout(0.2))
    model.add(LSTM(64, return_sequences=True, activation='relu'))
    model.add(Dropout(0.2))
    model.add(LSTM(50))
    model.add(Dropout(0.2))
    model.add(Dense(1))
    opt = keras.optimizers.Adam(learning_rate=0.001)
    model.compile(loss='mean_squared_error', optimizer=opt)

    model.fit(X_train,
              y_train,
              validation_data=(X_test, y_test),
              epochs=100,
              steps_per_epoch=26,
              batch_size=16,
              verbose=1)
    train_predict = model.predict(X_train)
    test_predict = model.predict(X_test)

    ##Transformback to original form
    train_predict = scaler.inverse_transform(train_predict)
    test_predict = scaler.inverse_transform(test_predict)
    y_train = scaler.inverse_transform(y_train.reshape(-1, 1))
    y_test = scaler.inverse_transform(y_test.reshape(-1, 1))

    #plottinng total data and predicted data
    alll = scaler.inverse_transform(df1).tolist()
    predicted = train_predict.tolist() + test_predict.tolist()
    plot_graph('total_plot', alll[80:], predicted[80:])

    #plotting the training prediction graph
    plot_graph('train_plot', y_train[80:], train_predict[80:])

    #plotting test prediction graph
    plot_graph('test_plot', y_test, test_predict)

    # prediction for future data
    x_input = test_data[len(test_data) - 3:].reshape(1, -1)
    temp_input = list(x_input)
    temp_input = temp_input[0].tolist()

    lst_output = []
    n_steps = 3
    i = 0
    while (i < 1):

        if (len(temp_input) > 3):
            x_input = np.array(temp_input[1:])
            x_input = x_input.reshape(1, -1)
            x_input = x_input.reshape((1, n_steps, 1))
            yhat = model.predict(x_input, verbose=0)
            temp_input.extend(yhat[0].tolist())
            temp_input = temp_input[1:]
            lst_output.extend(yhat.tolist())
            i = i + 1
        else:
            x_input = x_input.reshape((1, n_steps, 1))
            yhat = model.predict(x_input, verbose=0)
            temp_input.extend(yhat[0].tolist())
            lst_output.extend(yhat.tolist())
            i = i + 1

    prev = df['new_cases'][len(df) - 5:].tolist()
    new = scaler.inverse_transform(np.array(lst_output[0][0]).reshape(-1, 1))
    new = math.floor(new[0][0])
    data = alll[:len(predicted)]

    print("RMSE:", math.sqrt(mean_squared_error(data, predicted)))
    print("train mse:", math.sqrt(mean_squared_error(y_train, train_predict)))
    print("test mse:", math.sqrt(mean_squared_error(y_test, test_predict)))
    return (prev, new, math.sqrt(mean_squared_error(data, predicted)))