y_train

# In[22]:

one_hot_vec_size = y_train.shape[1]
one_hot_vec_size

# ## 3. 모델 구성하기

# In[23]:

model = Sequential()
model.add(
    LSTM(units=128,
         kernel_initializer='glorot_normal',
         bias_initializer='zero',
         batch_input_shape=(1, n_steps, n_inputs),
         stateful=True))
model.add(
    Dense(units=one_hot_vec_size,
          kernel_initializer='glorot_normal',
          bias_initializer='zero',
          activation='softmax'))

# ## 4. 모델 학습과정 설정하기

# In[24]:

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Пример #2
0
x_train = scaler.transform(x_train)
x_val = scaler.transform(x_val)
x_test = scaler.transform(x_test)
# print(x_train.shape)
# print(x_test.shape)
# print(x_val.shape)

x_train = x_train.reshape(323, 13, 1)
x_test = x_test.reshape(102, 13, 1)
x_val = x_val.reshape(81, 13, 1)

#2. 모델구성
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, LSTM
inputs = Input(shape=(13, 1))
dense1 = LSTM(50, activation='relu')(inputs)
dense1 = Dense(100)(dense1)
dense1 = Dense(100)(dense1)
dense1 = Dense(200)(dense1)
dense1 = Dense(100)(dense1)
dense1 = Dense(50)(dense1)
outputs = Dense(1)(dense1)

model = Model(inputs=inputs, outputs=outputs)

#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train,
          y_train,
          batch_size=16,
          epochs=300,
Пример #3
0
y1 = array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 50, 60, 70])
y2 = array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 50, 60, 70])

x1_predict = array([55, 65, 75])
x2_predict = array([65, 75, 85])

x1 = x1.reshape(x1.shape[0], x1.shape[1], 1)
x2 = x2.reshape(x2.shape[0], x2.shape[1], 1)

#2. 모델구성
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, LSTM

# 모델 1
input1 = Input(shape=(3, 1))
dense1 = LSTM(16, activation='relu')(input1)
dense1 = Dense(40, activation='relu')(dense1)
dense1 = Dense(80, activation='relu')(dense1)
dense1 = Dense(160, activation='relu')(dense1)
dense1 = Dense(80, activation='relu')(dense1)
dense1 = Dense(40, activation='relu')(dense1)
dense1 = Dense(20, activation='relu')(dense1)
# output1 = Dense(3)(dense1)

# 모델 2
input2 = Input(shape=(3, 1))
dense2 = LSTM(16, activation='relu')(input2)
dense2 = Dense(1)(dense2)
# output2 = Dense(3)(dense2)

# 모델 병합 / concatenate
Пример #4
0
    def Make_block(
            BLOCK_TYPE,
            BLOCK,
            DROPOUT_RATE,  #FULLY_CONNECTED,
            NUM_FILTERS,
            KERNEL_SIZE,
            KERNEL_STRIDE,
            POOL_STRIDE,
            POOL_SIZE,
            PADDING,
            input_dim,
            model_type=None,
            end_seq=None):
        """
        if model_type == "CNN":
        
            # First layer: Include input dim
            if BLOCK_TYPE == 1 & BLOCK == 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', 
                             strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
               
            # No need to specify input dim
            if BLOCK_TYPE == 1 & BLOCK > 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
        
            if BLOCK_TYPE == 2 & BLOCK == 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', 
                             strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
                model.add(BatchNormalization())
                
            if BLOCK_TYPE == 2 & BLOCK > 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
                model.add(BatchNormalization())
                
            if BLOCK_TYPE == 3 & BLOCK == 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', 
                             strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
                model.add(Dropout(DROPOUT_RATE))
                
            if BLOCK_TYPE == 3 & BLOCK > 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
                model.add(Dropout(DROPOUT_RATE))
            
            if BLOCK_TYPE == 4 & BLOCK == 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', 
                             strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
                model.add(BatchNormalization())
                model.add(Dropout(DROPOUT_RATE))
                
            if BLOCK_TYPE == 4 & BLOCK > 1:
                model.add(Conv1D(NUM_FILTERS, kernel_size=KERNEL_SIZE, padding='valid', activation='relu', strides=KERNEL_STRIDE, use_bias=True, input_shape=input_dim))
                model.add(MaxPooling1D(pool_size=POOL_SIZE, strides=POOL_STRIDE))
                model.add(BatchNormalization())
                model.add(Dropout(DROPOUT_RATE))
        
        """
        if model_type == "LSTM":

            if BLOCK == 1:
                FULLY_CONNECTED = FC_BLOCK1
            if BLOCK == 2:
                FULLY_CONNECTED = FC_BLOCK2
            if BLOCK == 3:
                FULLY_CONNECTED = FC_BLOCK3
            if BLOCK == 4:
                FULLY_CONNECTED = FC_BLOCK4

            # For contiunation of the recurrent sequences
            if end_seq == False:
                print("type", BLOCK_TYPE, "Seq = T")

                # First layer: Include input dim

                # Basic LSTM with recurrent dropout
                if BLOCK_TYPE == 1:
                    model.add(
                        LSTM(FULLY_CONNECTED,
                             implementation=2,
                             input_shape=input_dim,
                             recurrent_dropout=DROPOUT_RATE,
                             return_sequences=True))

                # LSTM with batchNorm and recurrent dropout
                if BLOCK_TYPE == 2:
                    model.add(
                        LSTM(FULLY_CONNECTED,
                             implementation=2,
                             input_shape=input_dim,
                             recurrent_dropout=DROPOUT_RATE,
                             return_sequences=True))
                    model.add(BatchNormalization())

                # LSTM with no dropout
                if BLOCK_TYPE == 3:
                    model.add(
                        CuDNNLSTM(
                            FULLY_CONNECTED,  #implementation=2, 
                            input_shape=input_dim,
                            return_sequences=True))

                # LSTM with batchNorm and no dropout
                if BLOCK_TYPE == 4:
                    model.add(
                        CuDNNLSTM(
                            FULLY_CONNECTED,  #implementation=2, 
                            input_shape=input_dim,
                            return_sequences=True))
                    model.add(BatchNormalization())

            # For ending the sequence
            if end_seq == True:
                print("type", BLOCK_TYPE, "Seq = F")

                # First layer: Include input dim

                # Basic LSTM with recurrent dropout
                if BLOCK_TYPE == 1:
                    model.add(
                        LSTM(FULLY_CONNECTED,
                             implementation=2,
                             recurrent_dropout=DROPOUT_RATE,
                             input_shape=input_dim,
                             return_sequences=False))

                # LSTM with batchNorm and recurrent dropout
                if BLOCK_TYPE == 2:
                    model.add(
                        LSTM(FULLY_CONNECTED,
                             implementation=2,
                             recurrent_dropout=DROPOUT_RATE,
                             input_shape=input_dim,
                             return_sequences=False))
                    model.add(BatchNormalization())

                # LSTM with no dropout
                if BLOCK_TYPE == 3:
                    model.add(
                        CuDNNLSTM(
                            FULLY_CONNECTED,  #implementation=2, 
                            input_shape=input_dim,
                            return_sequences=False))

                # LSTM with batchNorm and no dropout
                if BLOCK_TYPE == 4:
                    model.add(
                        CuDNNLSTM(
                            FULLY_CONNECTED,  #implementation=2, 
                            input_shape=input_dim,
                            return_sequences=False))
                    model.add(BatchNormalization())

        return
"""

sms_length = 20
embedded_docs = pad_sequences(onehot_repr, padding = 'pre', maxlen = sms_length)
print(embedded_docs)

embedded_docs[0]

"""# **Model Creation** 
LSTM model
"""

embedding_vector_feature = 40  #coverting embedded doc into a vector of 40, this will now take 40 features
model = Sequential()
model.add(Embedding(voc_size, embedding_vector_feature, input_length=sms_length))
model.add(Bidirectional(LSTM(100)))  # 2 lstm is build 
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print(model.summary())

len(embedded_docs), y.shape

x_final = np.array(embedded_docs)
y_final = np.array(y)

"""# Spliting the data into test and train part"""

X_train, X_test, Y_train, Y_test = train_test_split(x_final, y_final, test_size = 0.35, random_state = 42)

"""# **Training the model**"""
Пример #6
0
x_predict2 : [[1.65016254 1.34013605 1.13340003]]
'''

# 모델 부터 ~
# predict도 scale 변환해야 합니다~

# LSTM 함수형 모델 구성
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, LSTM, Input

x = x.reshape(14, 3, 1)
x_predict1 = x_predict1.reshape(1, 3, 1)
x_predict2 = x_predict2.reshape(1, 3, 1)

model = Sequential()
model.add(LSTM(10, activation='relu', input_shape=(3, 1)))
model.add(Dense(20))
model.add(Dense(10))
model.add(Dense(1))

model.summary()

# 컴파일, 훈련

# from sklearn.model_selection import train_test_split
# x_train, x_test, y_train, y_test = train_test_split(
#     x, y, shuffle=False, train_size=0.8
# )

from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
Пример #7
0
train = reframed_train.values
test = reframed_test.values
valid = reframed_valid.values
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
valid_X, valid_y = valid[:, :-1], valid[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
valid_X = valid_X.reshape((valid_X.shape[0], 1, valid_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape, valid_X.shape, valid_y.shape)


model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(25))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(train_X, train_y, epochs=100, batch_size=50, validation_data=(valid_X, valid_y), verbose=2,
                    shuffle=False)
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='valid')
plt.legend()
plt.show()

# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
Пример #8
0
np.random.shuffle(y_train)
tf.random.set_seed(7)

x_train, y_train = np.array(x_train), np.array(y_train)

x_train = np.reshape(x_train, (x_train.shape[0], 60, 1))

for i in range(60, len(test_set)):
    x_test.append(test_set[i-60:i, 0])
    y_test.append(test_set[i, 0])

x_test, y_test = np.array(x_test), np.array(y_test)
x_test = np.reshape(x_test, (x_test.shape[0], 60, 1))

model = tf.keras.Sequential([
    LSTM(80, return_sequences=True),
    Dropout(0.2),
    LSTM(100),
    Dropout(0.2),
    Dense(1)
])

model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')

checkpoint_save_path = './checkpoint/LSTM_stock.ckpt'

if os.path.exists(checkpoint_save_path + '.index'):
    print('----------------- load the model -------------------')
    model.load_weights(checkpoint_save_path)
plt.show()

# Reconstructing the first window to see how well the model performs.
plt.plot(x_train[0], 'red')
plt.plot(x_train_pred[0], 'blue')
plt.show()


# Plotting the comparison of the dataset with the anomalies.
anomalies_index = get_anomaly_indices(threshold=0.245, train_loss=train_mae_loss)
compare(cpu_utilization, anomalies_index)


# LSTM based auto-encoder model.
model2 = Sequential()
model2.add(LSTM(128, activation='relu', input_shape=(x_train.shape[1], x_train.shape[2]), return_sequences=True))
model2.add(LSTM(64, activation='relu', return_sequences=False))
model2.add(RepeatVector(x_train.shape[1]))
model2.add(LSTM(64, activation='relu', return_sequences=True))
model2.add(LSTM(128, activation='relu', return_sequences=True))
model2.add(TimeDistributed(Dense(x_train.shape[2])))
model2.compile(optimizer='adam', loss='mse')
model2.summary()


history2 = model2.fit(x_train, x_train, epochs=50, batch_size=128, validation_split=0.1).history


# Plotting the training and validation losses.
fig, ax = plt.subplots(figsize=(14, 6), dpi=80)
ax.plot(history2['loss'], 'b', label='Train', linewidth=2)
Пример #10
0
from tensorflow.keras.layers import Dense, LSTM, Dropout, Activation
import os

sequence_length = 100

FILE_PATH = "holmes.txt"
BASENAME = os.path.basename(FILE_PATH)

seed = "the hound of the baskervilles"

char2int = pickle.load(open(f"{BASENAME}-char2int.pickle", "rb"))
int2char = pickle.load(open(f"{BASENAME}-int2char.pickle", "rb"))
vocab_size = len(char2int)

model = Sequential([
    LSTM(256, input_shape=(sequence_length, vocab_size),
         return_sequences=True),
    Dropout(0.3),
    LSTM(256),
    Dense(vocab_size, activation="softmax"),
])

# load the optimal weights
model.load_weights(f"results/{BASENAME}-{sequence_length}.h5")

s = seed
n_chars = 1000
# generate n_chars characters
generated = ""
for i in tqdm.tqdm(range(n_chars), "Generating text"):
    # make the input sequence
    X = np.zeros((1, sequence_length, vocab_size))
Пример #11
0
y = y.reshape(y.shape[0],1)

from sklearn.model_selection import train_test_split
x_train, x_test , y_train  , y_test = train_test_split(x , y , train_size=0.8, random_state=1)

# 2. Model
from tensorflow.keras.models import Sequential , Model
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from keras.layers import Input, concatenate

input1 = Input(shape=(5,2))
LSTM1_1 = LSTM(50,activation='relu',name='LSTM1_1')(input1)
dense1_1 = Dense(60,activation='relu',name='dense1_1')(LSTM1_1)
dense1_2 = Dense(70,activation='relu',name='dense1_2')(dense1_1)
dense1_3 = Dense(80,activation='relu',name='dense1_3')(dense1_2)
dense1_4 = Dense(70,activation='relu',name='dense1_4')(dense1_3)
dense1_5 = Dense(60,activation='relu',name='dense1_5')(dense1_4)
dense1_6 = Dense(50,activation='relu',name='dense1_6')(dense1_5)
dense1_7 = Dense(30,activation='relu',name='dense1_7')(dense1_6)
dense1_8 = Dense(20,activation='relu',name='dense1_8')(dense1_7)
dense1_9 = Dense(10,activation='relu',name='dense1_9')(dense1_8)
output1 = Dense(1,name="output2")(dense1_9)

model = Model(inputs=(input1),outputs=output1)
model.summary()

ypred = model.predict(x_test_seq_pad)
ypredout = np.argmax(ypred, axis=1)
testscores = metrics.accuracy_score(y_test, ypredout)
confusion = metrics.confusion_matrix(y_test, ypredout)
print("accuracy:%.2f%%" % (testscores * 100))
print(metrics.classification_report(y_test, ypredout, digits=2))
print(confusion)

# Bidirectional LSTM
no_of_epochs = 10
model = Sequential()
model.add(embedding_layer)
model.add(
    Bidirectional(
        LSTM(output_size,
             activation=rnn_activation,
             recurrent_activation=recurrent_activation)))
model.add(Dropout(0.25))
model.add(Dense(2))
model.add(Activation('sigmoid'))
model.summary()

model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
print('Bidirectional LSTM')
model.fit(x_seq_pad,
          y,
          batch_size=batch_size,
          epochs=no_of_epochs,
          validation_split=validation_split,
          shuffle=shuffle)
Пример #13
0
# Re-shaping data
x_train_reshaped = x_train_processed.reshape(len(x_train_processed), 1755, 1)

# Save copy of downsampled
np.savetxt(
    '/mnt/ml4cvd/projects/skhurshid/bmi707/x_train_downsampled_3510.tsv',
    x_train_reshaped,
    fmt='%.1f')
np.savetxt('/mnt/ml4cvd/projects/skhurshid/bmi707/y_train.tsv',
           y_train,
           fmt='%.1f')

timesteps = 1755
data_dim = 1

# Model
model = Sequential()
model.add(LSTM(
    16, return_sequences=False,
    input_shape=(timesteps,
                 data_dim)))  # returns a sequence of vectors of dimension 32
model.add(Dense(1, activation='sigmoid'))

# Compile
opt = optimizers.SGD(lr=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])

# Fit
model.fit(x_train_reshaped, y_train, epochs=100, verbose=1)
    QK = Activation("softmax",name=name)(QK)
    MV = Multiply()([V, QK])
    return(MV)



x_ly=tf.convert_to_tensor(train_x)
#X_train_au1=X_train_au[:484]
#print("lyric输入shape: ",x_ly.shape,"\n audio输入shape: ",
#      X_train_au.shape,"\n修改后的shape:",X_train_au1.shape)
train_x=embedding_matrix
train_y=tf.convert_to_tensor([[float(i)] for i in y_train])
#print(y_train)
ly_input = Input((100,32), name='ly_input' )
au_input = Input((1,54), name='au_input')
lstm_out_ly = LSTM(32)(ly_input)
lstm_out_au = LSTM(32)(au_input)

#x = keras.layers.concatenate([lstm_out, lstm_out1])
x = keras.layers.concatenate([lstm_out_ly, lstm_out_au])
print(x.shape)
x = Att(64,x,"attention_vec")

x = Dense(32, activation='relu')(x)
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
 
model = Model(inputs=[ly_input, au_input], outputs=[main_output])
#model.compile(optimizer='rmsprop', loss='binary_crossentropy', loss_weights=[1., 0.2])
#model.compile(optimizer='rmsprop', 
#            loss={'main_output': 'binary_crossentropy'},
#            loss_weights={'main_output': 1.})
            # and will not include the start character.

            decoder_target_data[i, t - 1, target_token_index[word]] = 1

#Model Build  :

latent_dim = 128

from tensorflow.keras.layers import Dense, LSTM, Embedding, Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping

encoder_inputs = Input(shape=(None, ))
encoder_embed_layer = Embedding(num_encoder_tokens, latent_dim)(encoder_inputs)
encoder_LSTM = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_LSTM(encoder_embed_layer)

encoder_states = [state_h, state_c]

# Set up the decoder, using `encoder_states` as initial state.

decoder_inputs = Input(shape=(None, ))

decoder_embed_layer = Embedding(num_decoder_tokens, latent_dim)
final_decoder_embed_layer = decoder_embed_layer(decoder_inputs)

decoder_LSTM = LSTM(latent_dim, return_sequences=True, return_state=True)

decoder_outputs, _, _ = decoder_LSTM(final_decoder_embed_layer,
                                     initial_state=encoder_states)
Пример #16
0
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM

mnist = tf.keras.datasets.mnist  # mnist is a dataset of 28x28 images of handwritten digits and their labels
(x_train, y_train), (x_test, y_test) = mnist.load_data(
)  # unpacks images to x_train/x_test and labels to y_train/y_test

x_train = x_train / 255.0
x_test = x_test / 255.0

model = Sequential()
model.add(
    LSTM(128,
         input_shape=(x_train.shape[1:]),
         activation='relu',
         return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.1))

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(10, activation='softmax'))

opt = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)

model.compile(
    loss='sparse_categorical_crossentropy',
Пример #17
0
y_train.shape

x_test.shape
y_test.shape


x_train=x_train.reshape(x_train.shape[0],x_train.shape[1],1)
x_test=x_test.reshape(x_test.shape[0],x_test.shape[1],1)

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM


model=Sequential()
model.add(LSTM(50,return_sequences=True,input_shape=(100,1)))
model.add(LSTM(50,return_sequences=True))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss="mean_squared_error",optimizer='adam')


model.summary()


model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=100,batch_size=64,verbose=1)


###prediction with the models
train_prediction=model.predict(x_train)
test_prediction=model.predict(x_test)
target_data = target_data[p]

pd.set_option('display.max_colwidth', -1)
BUFFER_SIZE = len(input_data)
BATCH_SIZE = 128
embedding_dim = 300
units = 128
vocab_in_size = len(input_lang.word2idx)
vocab_out_size = len(target_lang.word2idx)

# Create the Encoder layers first.
encoder_inputs = Input(shape=(len_input,))
encoder_emb = Embedding(input_dim=vocab_in_size, output_dim=embedding_dim)

# Create the Bidirectional LSTM
encoder_lstm = Bidirectional(LSTM(units=units, return_sequences=True, return_state=True))
encoder_out, fstate_h, fstate_c, bstate_h, bstate_c = encoder_lstm(encoder_emb(encoder_inputs))
state_h = Concatenate()([fstate_h,bstate_h])
state_c = Concatenate()([bstate_h,bstate_c])
encoder_states = [state_h, state_c]


# Now create the Decoder layers.
decoder_inputs = Input(shape=(None,))
decoder_emb = Embedding(input_dim=vocab_out_size, output_dim=embedding_dim)
decoder_lstm = LSTM(units=units*2, return_sequences=True, return_state=True)
decoder_lstm_out, _, _ = decoder_lstm(decoder_emb(decoder_inputs), initial_state=encoder_states)
# Two dense layers added to this model to improve inference capabilities.
decoder_d1 = Dense(units, activation="relu")
decoder_d2 = Dense(vocab_out_size, activation="softmax")
decoder_out = decoder_d2(Dropout(rate=.2)(decoder_d1(Dropout(rate=.2)(decoder_lstm_out))))
Пример #19
0
##############################
######
#
######
#    #
######
##############################
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout

lr = 1e-3

model = Sequential(name='red_recurrente_LSTM')
model.add(LSTM(16, input_shape=(lag, 1), name='LSTM'))
model.add(Dense(1, name='Dense'))
model.compile(loss=keras.losses.MeanSquaredError(),
              optimizer=keras.optimizers.Adam(lr))

model.summary()

##############################
######
#
#
#
#
##############################
epochs = 200
batch_size = 10
Пример #20
0
encoded_answer = to_categorical(tf.squeeze(answer, axis=1), num_classes=vocab_size)

###################################
#       Model                     #
###################################
context_model = Sequential()
context_model.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE, input_length=MAX_CONTEXT))
context_model.add(Dropout(0.3))
# summarize the model
print(context_model.summary())

# generate embeddings for question and make adaptable to story
question_model = Sequential()
question_model.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE, input_length=MAX_QUESTION))
question_model.add(Dropout(0.3))
question_model.add(LSTM(300, return_sequences=False))
question_model.add(RepeatVector(MAX_CONTEXT))

print(question_model.summary())

# merge the two
merged_model = add([context_model.output, question_model.output])

model_combined = Sequential()
model_combined.add(LSTM(300, return_sequences=False))
model_combined.add(Dropout(0.3))
model_combined.add(Dense(vocab_size, activation="softmax"))

# combine models
final_model = Model([context_model.input, question_model.input], model_combined(merged_model))
print(final_model.summary())
Пример #21
0
# y의 유니크한 값 출력
y_bunpo = np.unique(y_train)
print(y_bunpo)
'''

############################ 전처리 ################################
from tensorflow.keras.preprocessing.sequence import pad_sequences
x_train = pad_sequences(x_train, maxlen=100)
x_test = pad_sequences(x_test, maxlen=100)

from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, LSTM, Embedding, Flatten, Conv1D

model = Sequential()
model.add(Embedding(1000, 400, input_length=100))
model.add(LSTM(200))
model.add(Dense(1,activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
es = EarlyStopping(monitor='val_loss', patience=30,mode='min')
lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='min')
file_path = 'c:/data/modelcheckpoint/checkpoint_85.hdf5'
mc = ModelCheckpoint(file_path,monitor='val_acc', save_best_only=True, mode='max',verbose=1)
model.fit(x_train,y_train, batch_size=16, epochs=200, validation_split=0.2, callbacks=[es,lr,mc])


loss, acc = model.evaluate(x_test,y_test,batch_size=16)
print("Loss : ", loss)
print("Accuracy : ", acc)
model2 = load_model('c:/data/modelcheckpoint/checkpoint_85.hdf5')
Пример #22
0
# Build the dataset
for t in range(len(series) - T):
    x, y = series[t:t + T], series[t + T]
    X.append(x)
    Y.append(y)

X = np.array(X).reshape(-1, T, 1)  # Now the data should be N x T x D
Y = np.array(Y)
N = len(X)
print("X.shape", X.shape, "Y.shape",
      Y.shape)  # X.shape (1249, 10, 1) Y.shape (1249,)

# autoregressive RNN model
i = Input(shape=(T, 1))
x = LSTM(units=5)(i)
x = Dense(units=1)(x)
model = Model(i, x)
model.compile(loss='mse', optimizer=Adam(lr=0.1))

# train the RNN
r = model.fit(
    X[:-N // 2],
    Y[:-N // 2],
    epochs=80,
    validation_data=(X[-N // 2:], Y[-N // 2:]),
)

# Plot loss per iteration
import matplotlib.pyplot as plt
Пример #23
0
x_val = data[p1:p2]
y_val = labels[p1:p2]
x_test = data[p2:]
y_test = labels[p2:]
print('train docs: ' + str(len(x_train)))
print('val docs: ' + str(len(x_val)))
print('test docs: ' + str(len(x_test)))

print('(5) training model...')

model = Sequential()
model.add(
    Embedding(len(word_index) + 1,
              EMBEDDING_DIM,
              input_length=MAX_SEQUENCE_LENGTH))
model.add(LSTM(200, dropout=0.2, recurrent_dropout=0.2))
model.add(Dropout(0.2))
model.add(Dense(labels.shape[1], activation='softmax'))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])
print(model.metrics_names)
model.fit(x_train,
          y_train,
          validation_data=(x_val, y_val),
          epochs=2,
          batch_size=128)
# model.save('lstm.h5')
Пример #24
0
(x_train,
 y_train), (x_test,
            y_test) = keras.datasets.imdb.load_data(num_words=num_words)

x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)

encoder_inputs = Input(shape=(maxlen, ), name='Encoder-Input')
emb_layer = Embedding(num_words,
                      embed_dim,
                      input_length=maxlen,
                      name='Word-Embedding',
                      mask_zero=False)
x = emb_layer(encoder_inputs)
state_h = Bidirectional(LSTM(128, activation='relu',
                             name='Encoder-Last-LSTM'))(x)
encoder_model = Model(inputs=encoder_inputs,
                      outputs=state_h,
                      name='Encoder-Model')
seq2seq_encoder_out = encoder_model(encoder_inputs)

decoded = RepeatVector(maxlen)(seq2seq_encoder_out)
decoder_lstm = Bidirectional(
    LSTM(128, return_sequences=True, name='Decoder-LSTM-before'))
decoder_lstm_output = decoder_lstm(decoded)
decoder_dense = Dense(num_words,
                      activation='softmax',
                      name='Final-Output-Dense-before')
decoder_outputs = decoder_dense(decoder_lstm_output)

seq2seq_Model = Model(encoder_inputs, decoder_outputs)
Пример #25
0
embedding_matrix = np.load('embedding_matrix.npy')
SC = StandardScaler()
y_train = SC.fit_transform(y_train.values.reshape(-1, 1))
y_val = SC.fit_transform(y_val.values.reshape(-1, 1))
y_test = SC.fit_transform(y_test.values.reshape(-1, 1))
input_1 = Input(shape=(train_text1_seq.shape[1], ))
input_2 = Input(shape=(train_text2_seq.shape[1], ))
embed = Embedding(input_dim=vocab_size,
                  output_dim=300,
                  weights=[embedding_matrix],
                  input_length=train_text1_seq.shape[1],
                  trainable=False)
lstm_1 = embed(input_1)
lstm_2 = embed(input_2)
lstm = Bidirectional(
    LSTM(50, return_sequences=True, activation='relu', dropout=0.2))
vector_1 = lstm(lstm_1)
vector_2 = lstm(lstm_2)
vector_1 = Flatten()(vector_1)
vector_2 = Flatten()(vector_2)
conc = concatenate([vector_1, vector_2])
out = Dense(1)(conc)
model = Model([input_1, input_2], out)
callback = [EarlyStopping(patience=8)]
model.compile(optimizer=Adam(0.00001), loss='mse', metrics=['mae'])
model.load_weights('vcp8.h5')
history = model.fit([train_text1_seq, train_text2_seq],
                    y_train.values.reshape(-1, 1),
                    epochs=100,
                    batch_size=32,
                    validation_data=([val_text1_seq, val_text2_seq],
Пример #26
0
        return x, y
    
    def __len__(self):
        return self.n_batches
    
    def on_epoch_end(self):
        self._refresh_sample_keys()

data = Data()

backbone = keras_model(include_top=False, pooling='avg', weights='imagenet', input_shape=(224, 224, 3))
backbone.trainable = False

rnn_model = Sequential()
rnn_model.add(TimeDistributed(backbone))
rnn_model.add(LSTM(512, input_shape=(25, 2048), return_sequences=True))
rnn_model.add(Dropout(0.5))
rnn_model.add(LSTM(512, input_shape=(25, 2048), return_sequences=True))
rnn_model.add(Dropout(0.5))
rnn_model.add(TimeDistributed(Dense(5, activation="softmax")))

print("START LSTM TRAINING")

rnn_model.layers[0].trainable = False
rnn_model.compile(loss='categorical_crossentropy',
                  optimizer="adam",
                  metrics=['accuracy'])

sequence = SliceDataGenerator(data, "KAG", batch_size=2)
rnn_history = rnn_model.fit(sequence, epochs=300)
Пример #27
0
def lstm(MODEL_NAME, train_data, test_data, MAX_LEN=32):
    if MODEL_NAME == 'summalstm':
        print("summarizing data that are longer than MAX_LEN " + str(MAX_LEN))
        log.info("summarizing data that are longer than MAX_LEN " +
                 str(MAX_LEN))
        if config.data_name == 'nsmc':
            summarizer = KeywordSummarizer(tokenize=komoran_tokenizer,
                                           min_count=1,
                                           min_cooccurrence=1)
            print("summarizing train data")
            train_data = kor_summa(summarizer, train_data, MAX_LEN)
            print("summarizing test data")
            test_data = kor_summa(summarizer, test_data, MAX_LEN)

        elif config.data_name == 'imdb':
            print("not implemented yet...")

    print("tokenizing...")
    log.info("tokenizing...")
    okt = Okt()
    print("tokenizing train_data")
    X_train = tokenize(train_data, okt)
    print("tokenizing test_data")
    X_test = tokenize(test_data, okt)
    print()
    log.info("")

    # encode
    print("encoding and preprocessing...")
    log.info("encoding and preprocessing...")
    tokenizer = Tokenizer()
    tokenizer.fit_on_texts(X_train)

    threshold = 3
    total_cnt = len(tokenizer.word_index)
    rare_cnt = 0
    total_freq = 0
    rare_freq = 0

    for key, value in tokenizer.word_counts.items():
        total_freq = total_freq + value

        if (value < threshold):
            rare_cnt = rare_cnt + 1
            rare_freq = rare_freq + value

    # delete rare tokens
    vocab_size = total_cnt - rare_cnt + 1

    tokenizer = Tokenizer(vocab_size)
    tokenizer.fit_on_texts(X_train)
    X_train = tokenizer.texts_to_sequences(X_train)
    X_test = tokenizer.texts_to_sequences(X_test)
    y_train = np.array(train_data['label'])
    y_test = np.array(test_data['label'])

    drop_train = [
        index for index, sentence in enumerate(X_train) if len(sentence) < 1
    ]
    drop_test = [
        index for index, sentence in enumerate(X_test) if len(sentence) < 1
    ]

    # delete empty samples
    X_train = np.delete(X_train, drop_train, axis=0)
    y_train = np.delete(y_train, drop_train, axis=0)
    X_test = np.delete(X_test, drop_test, axis=0)
    y_test = np.delete(y_test, drop_test, axis=0)

    # 32 / 64 / 128
    max_len = MAX_LEN

    # padding
    X_train = pad_sequences(X_train, maxlen=max_len)
    X_test = pad_sequences(X_test, maxlen=max_len)
    print()

    print("reached checkpoint!")
    log.info("reached checkpoint!")

    model = Sequential()
    model.add(Embedding(vocab_size, 100))
    model.add(LSTM(128))
    model.add(Dense(1, activation='sigmoid'))
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=4)
    mc = ModelCheckpoint('best_model.h5',
                         monitor='val_acc',
                         mode='max',
                         verbose=1,
                         save_best_only=True)
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    history = model.fit(X_train,
                        y_train,
                        epochs=15,
                        callbacks=[es, mc],
                        batch_size=60,
                        validation_split=0.1)
    loaded_model = load_model('best_model.h5')

    print("acc : %.4f" % (loaded_model.evaluate(X_test, y_test)[1]))
    log.info("acc : %.4f" % (loaded_model.evaluate(X_test, y_test)[1]))
Пример #28
0
def make_model(file_name="TD20200309210544.json",
               column_review="reviewText",
               column_rating="overall",
               json_balanced=True,
               have_corpus=True,
               size=10000):

    # Making a json file with balanced ratings
    if json_balanced == False:
        make_balance_json(r'static/DBAlpha/TrainingDB/Files/' + file_name,
                          column_review, column_rating,
                          "main/files/uniform_json.json", size / 5)
    dataset = read_json('main/files/uniform_json.json', lines=True)
    dataset = dataset[:size]

    # Making corpus, in case corpus doesn't exists
    if have_corpus == False:
        corpus = basic.preprocess_lemm_dataset(dataset, 'review')
        process_corpus.write_corpus(corpus)

    # If corpus exists, read it directly
    else:
        corpus = []
        corpus = process_corpus.read_corpus()
        corpus = corpus[:size]

    # Getting the ratings
    y = dataset.iloc[:size, 0]

    # Maximum words to consider
    TRAINING_VOCAB = 5000

    # Tokenizing the words upto the maximum vocabulary
    tokenizer = Tokenizer(num_words=TRAINING_VOCAB,
                          lower=True,
                          char_level=False)
    # Fitting the corpus to tokenizer
    tokenizer.fit_on_texts(corpus)
    training_sequences = tokenizer.texts_to_sequences(corpus)
    # Getting the encoding dictionary
    vocab_to_int = tokenizer.word_index

    sequence_length = 150

    # Padding to maximum sequence length
    features = pad_sequences(training_sequences, maxlen=sequence_length)
    """
    EMBEDDING_DIM = 300
    # Loading google's words to vect embedding
    print("\nLoading the Google's word2vec \nPlease Wait...")
    word2vec_path = 'resources/GoogleNews-vectors-negative300.bin'
    word2vec = models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
    
    train_embedding_weights = np.zeros((len(vocab_to_int), EMBEDDING_DIM))
    for word,index in vocab_to_int.items():
        if word in word2vec:
            train_embedding_weights[index,:] = word2vec[word]  
        else:
            np.random.rand(EMBEDDING_DIM)
    print(train_embedding_weights.shape)
    """

    # Variables for RNN LSTM
    vocab_size = len(vocab_to_int)
    embedding_dim = 512

    # Training parameters
    batch_size = int(size // 100)
    num_epochs = 30

    # Encoding y data into diffeerent categorical columns
    labelencoder_y = LabelEncoder()
    y = labelencoder_y.fit_transform(y)
    y = y.reshape(len(y), 1)
    onehotencoder = OneHotEncoder()
    y = onehotencoder.fit_transform(y).toarray()

    # Splitting the dataset into the Training set and Test set
    X_train, X_test, y_train, y_test = train_test_split(features,
                                                        y,
                                                        test_size=0.20,
                                                        random_state=0)

    # Initialising the RNN
    model = Sequential()

    # Adding Layers to RNN
    #model.add(Embedding(vocab_size, embedding_dim, weights = [train_embedding_weights],input_length=sequence_length))
    if size > 2000:
        model.add(
            Embedding(TRAINING_VOCAB,
                      embedding_dim,
                      input_length=sequence_length))
    else:
        model.add(
            Embedding(TRAINING_VOCAB, size / 10, input_length=sequence_length))

    model.add(LSTM(100, return_sequences=True))
    model.add(LSTM(100))
    model.add(Dense(units=200, kernel_initializer='uniform',
                    activation='relu'))

    model.add(Dense(5, activation='sigmoid'))
    #rmsprop=optimizers.rmsprop(lr=0.01)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    # Fitting the ANN to the Training set
    model.fit(X_train, y_train, batch_size=batch_size, epochs=num_epochs)

    # Predicting the Test set results over trained model
    y_pred = model.predict(X_test)

    # Getting result in proper format that is initially probabilistic
    for i in range(len(y_pred)):
        ind_ = 0
        max_ = y_pred[i][0]
        for j in range(5):
            if y_pred[i][j] > max_:
                max_ = y_pred[i][j]
                ind_ = j
            y_pred[i][j] = 0
        y_pred[i][ind_] = 1

    # Inverse Transforming the categorical encodings on y_pred and y_test
    y_pred = onehotencoder.inverse_transform(y_pred)
    y_test = onehotencoder.inverse_transform(y_test)

    # Measuring the performance
    accuracy = accuracy_score(y_test,
                              y_pred,
                              normalize=True,
                              sample_weight=None)

    #
    file_name = re.sub(".json", "", file_name)
    with open(r'static/DBAlpha/TrainingDB/Models/TOKEN_' + file_name + ".pkl",
              'wb') as handle:
        pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)

    model.save(r'static/DBAlpha/TrainingDB/Models/' + file_name + '.h5')

    #     Returning the performance parameters
    return accuracy
x_train = []
y_train = []

for x in range(prediction_days, len(scaled_data)):
    x_train.append(scaled_data[x - prediction_days:x, 0])
    y_train.append(scaled_data[x, 0])

x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))

# Build the model
model = Sequential()

model.add(
    LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))  # Prediction of the next closing price

model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, epochs=25, batch_size=32)

# Load the test data
test_start = dt.datetime(2020, 1, 1)
test_end = dt.datetime.now()

test_data = web.DataReader(company, 'yahoo', test_start, test_end)
cnn_model.add(Dropout(0.25))

cnn_model.add(Flatten())
cnn_model.add(Dense(128))
cnn_model.add(BatchNormalization())
cnn_model.add(LeakyReLU(alpha=.001))
cnn_model.add(Dropout(0.25))

# cnn_model.summary()

timesteps = 4

lstm_model = Sequential()
# lstm_model.add(Reshape((28, 512), input_shape=(28, 512)))
lstm_model.add(
    LSTM(256, input_shape=(28, 128), dropout=0.15, return_sequences=True))
lstm_model.add(BatchNormalization())
lstm_model.add(LSTM(512, dropout=0.15, return_sequences=False))
lstm_model.add(Dense(256))
lstm_model.add(BatchNormalization())
lstm_model.add(LeakyReLU(alpha=.001))
lstm_model.summary()

upsample_model = Sequential()
upsample_model.add(Reshape((16, 16, 1), input_shape=(1, 256)))
upsample_model.add(Conv2DTranspose(16, kernel_size=(4, 4), activation='relu'))
upsample_model.add(BatchNormalization())
upsample_model.add(Conv2DTranspose(32, kernel_size=(4, 4), activation='relu'))
upsample_model.add(BatchNormalization())
upsample_model.add(Conv2DTranspose(16, kernel_size=(4, 4), activation='relu'))
upsample_model.add(BatchNormalization())