示例#1
0
#y_train1 = np.array(Y)
y_test1 = np.array(C)

#y_train= to_categorical(y_train1)
y_test = to_categorical(y_test1)

# reshape input to be [samples, time steps, features]
#X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
X_train = np.reshape(testT, (testT.shape[0], 1, testT.shape[1]))

batch_size = 32

# 1. define the network
model = Sequential()
model.add(SimpleRNN(4, input_dim=41))  # try using a GRU instead, for fun
model.add(Dropout(0.1))
model.add(Dense(5))
model.add(Activation('softmax'))
'''
# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="kddresults/lstm1layer/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='val_acc',mode='max')
csv_logger = CSVLogger('training_set_iranalysis.csv',separator=',', append=False)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, validation_data=(X_test, y_test),callbacks=[checkpointer,csv_logger])
model.save("kddresults/lstm1layer/fullmodel/lstm1layer_model.hdf5")

loss, accuracy = model.evaluate(X_test, y_test)
print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy*100))
y_pred = model.predict_classes(X_test)
np.savetxt('kddresults/lstm1layer/lstm1predicted.txt', np.transpose([y_test1,y_pred]), fmt='%01d')
示例#2
0
print("[DATA READ] {} samples, {} timesteps, {} features".format(
    n_conditions, n_timesteps, n_features))

#### DATA TRANSFORMATION (2D to 3D tensor) ###
for idx, row in pd_data.iterrows():
    for timestep in range(n_timesteps):
        X[idx, timestep, :] = row[input_column_names]

### CREATE MODEL ###
model = Sequential()
model.add(
    SimpleRNN(
        n_hidden_nodes,
        return_sequences=False,
        activation=hidden_node_activation,
        recurrent_constraint=GeneRegulatoryConstraint(gene_regulatory_network),
        recurrent_dropout=recurrent_dropout,
        input_shape=(n_timesteps, n_features)))
print("[MODEL CREATED]")
print(model.summary())

### MODEL COMPILE
earlystop = EarlyStopping(monitor='loss',
                          min_delta=0.0001,
                          patience=4,
                          verbose=0,
                          mode='auto')
callbacks = [earlystop] if earlystop_on else []
rmsprop = optimization_method()
loss = 'mse'
示例#3
0
    def fit_predict_model(self):
        embeddings_index = dict()
        with open(
                "E:\\Kaggle\\TMDB_Box_Office_Prediction\\glove\\glove.6B.50d.txt",
                mode="r",
                encoding="utf-8") as f:
            line = f.readline()
            while line:
                values = line.split()
                word = values[0]
                embeddings_index[word] = np.array(values[1:], dtype="float32")
                line = f.readline()

        embedding_matrix = np.zeros((len(self.__tok.word_index) + 1, 50))
        for word, i in self.__tok.word_index.items():
            embedding_vector = embeddings_index.get(word)
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector

        self.__folds = KFold(n_splits=5, shuffle=True, random_state=7)
        self.__oof_preds = np.zeros(shape=self.__train_feature.shape[0])
        self.__sub_preds = np.zeros(shape=self.__test_feature.shape[0])

        for n_fold, (trn_idx, val_idx) in enumerate(
                self.__folds.split(self.__train_feature, self.__train_label)):
            trn_x, trn_y = self.__train_feature[trn_idx], self.__train_label[
                trn_idx]
            val_x, val_y = self.__train_feature[val_idx], self.__train_label[
                val_idx]

            net = Sequential()
            net.add(
                Embedding(input_dim=len(self.__tok.word_index) + 1,
                          output_dim=50,
                          weights=[embedding_matrix],
                          input_length=self.__mle,
                          mask_zero=False,
                          trainable=False))
            net.add(SimpleRNN(units=2))
            net.add(Dense(1, activation="linear"))
            net.compile(loss=rmsle, optimizer=Adam())
            net.fit(x=trn_x,
                    y=np.log1p(trn_y),
                    batch_size=32,
                    epochs=10,
                    verbose=2,
                    callbacks=[
                        EarlyStopping(monitor="val_loss",
                                      mode="min",
                                      patience=2)
                    ],
                    validation_data=(val_x, val_y))

            pred_val = np.expm1(net.predict(val_x)).reshape(
                -1, )  # predict shape (, 1)
            pred_test = np.expm1(net.predict(self.__test_feature)).reshape(
                -1, )

            self.__oof_preds[val_idx] = pred_val
            self.__sub_preds += pred_test / self.__folds.n_splits

            del trn_x, trn_y, val_x, val_y
            gc.collect()
示例#4
0
tokenizer.fit_on_texts(X_train_data)

#문자를 정수로 변환
X_train_data = tokenizer.texts_to_sequences(X_train_data)
X_test_data = tokenizer.texts_to_sequences(X_test_data)


X_train = pad_sequences(X_train_data, maxlen=maxlen)
X_test = pad_sequences(X_test_data, maxlen=maxlen)

#LSTM 과 GRU는 이전 내용을 기억하기 위함

model2 = Sequential()
model2.add(Embedding(max_words , 30 ))
model2.add(Dropout(0.5))
model2.add(SimpleRNN(128))
model2.add(Dropout(0.5))
model2.add(Dense(1, activation='sigmoid'))

model2.compile(loss='binary_crossentropy' , optimizer='adam' , metrics=['acc'])

model2.fit(X_train , Y_train , epochs=10 , batch_size=50 , validation_data=(X_test , Y_test))

testing = '배달도 빠르고 기사님이 친절해요'


pred = []

temp_X = okt.morphs(testing , stem=True ,)  # morphs 는 형태소 분석 #stem : 유사한 단어 통합(만들고 만들어 만들 만듬)
    #분리된 단어들에서 불용어를 제거
temp_X = [word for word in temp_X if not word in stop_words]
示例#5
0
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# data pre-processing
X_train = X_train.reshape(-1, 28, 28) / 255.  # normalize
X_test = X_test.reshape(-1, 28, 28) / 255.  # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

# build RNN model
model = Sequential()

# RNN cell
model.add(SimpleRNN(
    # for batch_input_shape, if using tensorflow as the backend, we have to put None for the batch_size.
    # Otherwise, model.evaluate() will get error.
    batch_input_shape=(None, TIME_STEPS, INPUT_SIZE),  # Or: input_dim=INPUT_SIZE, input_length=TIME_STEPS,
    output_dim=CELL_SIZE,
    unroll=True,
))

# output layer
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))

# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# training
示例#6
0
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# data pre-processing
X_train = X_train.reshape(-1, 28, 28) / 255.      # normalize
X_test = X_test.reshape(-1, 28, 28) / 255.        # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

# build RNN model
model = Sequential()

# RNN cell
model.add(SimpleRNN(
    batch_input_shape=(None, TIME_STEPS, INPUT_SIZE),
    units=CELL_SIZE
))
# output layer
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))

# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# training
for step in range(4001):
    # data shape = (batch_num, steps, inputs/outputs)
    X_batch = X_train[BATCH_INDEX: BATCH_SIZE+BATCH_INDEX, :, :]
示例#7
0
            y_valid[i - 1200] = 1
    else:
        if i < 1200:
            x_train[i] = forgery[i // 2]
            y_train[i] = 0
        else:
            x_valid[i - 1200] = forgery[i // 2]
            y_valid[i - 1200] = 0
print(x_train)
print(x_valid)
print(y_train)
print(y_valid)

# Train Simple RNN model
model_RNN = Sequential()
model_RNN.add(SimpleRNN(100))
model_RNN.add(Dense(1, activation='sigmoid'))
# model_RNN.add(Dropout(rate=0.2))
model_RNN.compile(loss='binary_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])
# model_RNN.summary()

model_RNN.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=20)

# Train LSTM model
model_LSTM = Sequential()
model_LSTM.add(LSTM(100))
model_LSTM.add(Dense(1, activation='sigmoid'))
# model_RNN.add(Dropout(rate=0.2))
model_LSTM.compile(loss='binary_crossentropy',
示例#8
0
x_predict = array([50, 60, 70])  # (3, )

print('x.shape : ', x.shape)  # (13, 3)
print('y.shape : ', y.shape)  # (13, ) != (13, 1)
#  벡터      행렬

# x = x.reshape(13, 3, 1)
x = x.reshape(x.shape[0], x.shape[1],
              1)  # x.shape[0] = 13 / x.shape[1] = 3 / data 1개씩 작업 하겠다.
print(x.shape)  # (13, 3, 1)      / rehape확인 : 모든 값을 곱해서 맞으면 똑같은 거임

#2. 모델구성
model = Sequential()
# model.add(LSTM(10, activation='relu', input_shape = (3, 1)))
model.add(SimpleRNN(300, input_length=3,
                    input_dim=1))  # input_length : time_step (열)
model.add(Dense(101))
model.add(Dense(150))
model.add(Dense(200))
model.add(Dense(130))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(90))
model.add(Dense(70))
model.add(Dense(51))  # 5
model.add(Dense(1))

model.summary()

# # EarlyStopping
# from keras.callbacks import EarlyStopping
示例#9
0
# [sample, time steps, features]
# (batch_size, time_step_size, input_vec_size)
print(X_train.shape)
print(Y_train.shape)
print(Y_train.shape[1:])

# In[74]:

# build the model: a signal Simple RNN
print('Build Simple RNN model...')

rnn_model = Sequential()
rnn_model.add(
    SimpleRNN(256,
              input_shape=X_train.shape[1:],
              return_sequences=True,
              name='RNN'))
rnn_model.add(
    TimeDistributed(Dense(Y_train.shape[2], activation='softmax'),
                    name='softmax'))

rnn_model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
print(rnn_model.summary())

# In[62]:

model.fit(X_train, Y_train, nb_epoch=30, batch_size=64)
# scores = model.evaluate(X_test, Y_test, verbose=0)
# print("Accuracy:%.2f%%" % (scores[1]*100))
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

# 윈도우의 경우 이하 추가
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'

# 유닛 수, 스텝 수, 입력 차원 수, 입력 데이터의 형태를 정의
units = 10
time_steps = 5
input_dim = 15
input_shape = (time_steps, input_dim)

# 순환 신경망 모델 작성
x = Input(shape=input_shape, name='Input')
y = SimpleRNN(units=units, activation='sigmoid', name='SimpleRNN_1')(x)
model = Model(inputs=[x], outputs=[y])

# SVG 형식으로 표시(그림 6-1-5)
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))

# 순환 신경망 모델 작성, 시퀀스 출력
y = SimpleRNN(units=units, activation='sigmoid', return_sequences=True, name='SimpleRNN_1')(x)
model = Model(inputs=[x], outputs=[y])

# SVG 형식으로 모델 표시(그림 6-1-6)
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))

# 순환 신경망 모델 작성, 내부 상태도 출력
y, state = SimpleRNN(units=units, activation='sigmoid', return_state=True, name='SimpleRNN_1')(x)
model = Model(inputs=[x], outputs=[y])
示例#11
0
def train(x_train, y_train, x_test, y_test, maxlen, max_token,
          embedding_matrix, embedding_dims, batch_size, epochs, logpath,
          modelpath, modelname, num_classes):
    embedding_layer = Embedding(max_token + 1,
                                embedding_dims,
                                input_length=maxlen,
                                weights=[embedding_matrix],
                                trainable=True)
    print(modelname + 'Build model...')
    model = Sequential()
    model.add(embedding_layer)
    model.add(SimpleRNN(128, activation="relu"))
    # model.add(LSTM(128))
    # model.add(Bidirectional(LSTM(200))) ### 输出维度64 GRU
    # model.add(Bidirectional(GRU(64)))
    model.add(Dropout(0.2))
    model.add(Dense(num_classes, activation='sigmoid'))
    # try using different optimizers and different optimizer configs
    model.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])
    # lstm常选参数model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
    # a stateful LSTM model
    # lahead: the input sequence length that the LSTM
    # https://github.com/keras-team/keras/blob/master/examples/lstm_stateful.py
    # model = Sequential()
    # model.add(LSTM(20,input_shape=(lahead, 1),
    #               batch_size=batch_size,
    #               stateful=stateful))
    # model.add(Dense(num_classes))
    # model.compile(loss='mse', optimizer='adam')

    # model.load_weights('./model490w/TextBiLSTM.h5')

    early_stopping = EarlyStopping(monitor='val_acc', patience=3)
    checkpoint = keras.callbacks.ModelCheckpoint(filepath=modelpath +
                                                 modelname + '.h5',
                                                 monitor='val_acc',
                                                 verbose=1,
                                                 save_best_only=True,
                                                 save_weights_only=True,
                                                 mode='max',
                                                 period=1)
    tensorboard = keras.callbacks.TensorBoard(
        log_dir='./log/{}/'.format(modelname),
        histogram_freq=0,
        write_graph=True,
        write_images=True)
    callback_lists = [tensorboard, checkpoint, early_stopping]

    hist = model.fit(x_train,
                     to_categorical(y_train, num_classes),
                     validation_data=(x_test,
                                      to_categorical(y_test, num_classes)),
                     epochs=epochs,
                     batch_size=batch_size,
                     callbacks=callback_lists)
    max_val_acc = max(hist.history['val_acc'])
    os.rename(modelpath + modelname + '.h5',
              modelpath + modelname + '(' + str(max_val_acc) + ')' + '.h5')

    # print(hist.history)
    ##输出loss与acc到日志文件
    log_format = "%(asctime)s - %(message)s"
    logging.basicConfig(filename=logpath,
                        level=logging.DEBUG,
                        format=log_format)
    logging.warning(modelname)
    for i in range(len(hist.history["acc"])):
        strlog = str(i + 1) + " Epoch " + "-loss: " + str(
            hist.history["loss"][i]) + " -acc: " + str(
                hist.history["acc"][i]) + " -val_loss: " + str(
                    hist.history["val_loss"][i]) + " -val_acc: " + str(
                        hist.history["val_acc"][i])
        logging.warning(strlog)
# モデルの構築
# Build the model. We use a single RNN with a fully connected layer
# to compute the most likely predicted output char
HIDDEN_SIZE = 128
BATCH_SIZE = 128
NUM_ITERATIONS = 25
NUM_EPOCHS_PER_ITERATION = 1
NUM_PREDS_PER_EPOCH = 100

model = Sequential()
# 系列から1つだけを返してもらいたいので、return_sequences=Falseである。
# tensorflowのパフォーマンス向上のためunroll=Trueらしい
model.add(
    SimpleRNN(HIDDEN_SIZE,
              return_sequences=False,
              input_shape=(SEQLEN, nb_chars),
              unroll=True))
# RNNは隠れ層扱いで、最終的に全結合層に受け渡さないとニューロンの数があれ(語彙力)
model.add(Dense(nb_chars))
model.add(Activation("softmax"))

model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.summary()

# 今までと訓練のアプローチを変える
# 1EPOCHずつ学習させてテストする。これをNUM_ITERATIONS(25回)繰り返し、人間に理解出来る出力で停止する
# テストは本当はEPOCHやるたびにしないといけないのかな?だけどここでは、横着してNUM_ITERATIONIS回してからやることにする
# We train the model in batches and test output generated at each step
for iteration in range(NUM_ITERATIONS):
    print("=" * 50)
    print("Iteration #: {}".format(iteration))
示例#13
0
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# data pre-processing
X_train = X_train.reshape(-1, 28, 28) / 255  # normalize
X_test = X_test.reshape(-1, 28, 28) / 255  # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

# build rnn model
model = Sequential()

# RNN cell
model.add(
    SimpleRNN(
        batch_input_shape=(None, TIME_STEPS, INPUT_SIZE),
        output_dim=CELL_SIZE,
        unroll=True,
    ))

# output layer
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))

# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# train
for step in range(4001):
示例#14
0
train_x = np.load('train_x.npy')
test_x = np.load('test_x.npy')
train_x = np.expand_dims(train_x, axis=2)
test_x = np.expand_dims(test_x, axis=2)

train_data_y = np.append(np.ones(266, dtype=int), np.zeros(266, dtype=int))
train_y_1D = train_data_y
test_data_y = np.append(np.ones(114, dtype=int), np.zeros(114, dtype=int))
test_y_1D = test_data_y
train_y = np_utils.to_categorical(train_data_y, 2)
test_y = np_utils.to_categorical(test_data_y, 2)

model = Sequential()
model.add(
    SimpleRNN(batch_input_shape=(None, 12, 1),
              activation='relu',
              output_dim=30,
              unroll=True))
model.add(Dropout(0.2))
model.add(Dense(2))
model.add(Activation('softmax'))

optimizer = optimizers.RMSprop()
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])
print(model.summary())

history = loss_history.LossHistory()
model.fit(train_x,
          train_y,
          verbose=2,
示例#15
0
文件: RNN.py 项目: cuquiwi/DL-MAI

# Modify conveniently with the path for your data
data_train_path = '../data/notes_train2'
with open(data_train_path, 'rb') as f:
    notes_train = pickle.load(f)
data_test_path = '../data/notes_test2'
with open(data_test_path, 'rb') as f:
    notes_test = pickle.load(f)

x_train, y_train, scaler = prepare_notes(notes_train)
x_test, y_test, _ = prepare_notes(notes_test)

n_prev = 30
model = Sequential()
model.add(SimpleRNN(128, input_shape=(n_prev, 1), return_sequences=True))
model.add(SimpleRNN(128, return_sequences=True))
model.add(SimpleRNN(128, return_sequences=True))
model.add(SimpleRNN(128, return_sequences=False))
model.add(Dense(1))
model.add(Activation('linear'))
optimizer = Adam(lr=0.001)
model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])

hist = model.fit(np.array(x_train),
                 np.array(y_train),
                 batch_size=64,
                 epochs=200,
                 verbose=1,
                 validation_data=(np.array(x_test), np.array(y_test)))
示例#16
0
U = np.random.random((output_features, output_features))
b = np.random.random((output_features,))

successive_outputs = []
for input_t in inputs:
    output_t = np.tanh(np.dot(W, input_t) + np.dot(U, state_t) + b)
    successive_outputs.append(output_t)
    state_t = output_t
final_output_sequence = np.concatenate(successive_outputs, axis=0)

from keras.layers import SimpleRNN
from keras.models import Sequential
from keras.layers import Embedding, SimpleRNN
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.summary()


from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000
maxlen = 500
batch_size = 32
print('Loading data...')
(input_train, y_train), (input_test, y_test) = imdb.load_data(
        num_words=max_features)
print(len(input_train), 'train sequences')
print(len(input_test), 'test sequences')
print('Pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
示例#17
0
car_a_ind = {car: ind for ind, car in enumerate(sorted(alfabeto))}
# JC: Crea un diccionario utilizando los indices como llave y el caracter como valor de la llave
ind_a_car = {ind: car for ind, car in enumerate(sorted(alfabeto))}
# JC: Imprime los dos diccionarios anteriores
print(car_a_ind)
print(ind_a_car)

# 2. MODELO
# ===========================================================
n_a = 25  # Número de unidades en la capa oculta
# JC: Entrada de la red neuronal, tod
entrada = Input(shape=(None, tam_alfabeto))
# JC: Estado inicial de la red neuronal. El estado oculto
a0 = Input(shape=(n_a, ))
# JC: Celda recurrente para el entrenamiento. 25 Neurona, a la salida nos retorna le nuevo estado actualizado
celda_recurrente = SimpleRNN(n_a, activation='tanh', return_state=True)
# JC: Capa de salida con el tama;o y la función de activación. Toma la celda recurrente para su activación
capa_salida = Dense(tam_alfabeto, activation='softmax')
# JC: La salida esperada es la activación generada por la celda recurrente
salida = []
# JC: Se realiza una instancia de la celda recurrente utilizando la entrada (lista) y el estado inicial de esa entrada (valores)
hs, _ = celda_recurrente(entrada, initial_state=a0)
# JC: La salida se instancia agregando el resultado de la caelda de recurrencia a la capa de salida
salida.append(capa_salida(hs))
# JC: Se crea el modelo utilizando la entrada de datos, su estado oculto y la salida espeda
modelo = Model([entrada, a0], salida)
modelo.summary()
# JC: Se crea el optimizador gradiente descendiente para el entrentamiento del modelo. Aqui se crea el optimizador
opt = SGD(lr=0.0005)
# JC: Aqui se agrega.
modelo.compile(optimizer=opt, loss='categorical_crossentropy')
示例#18
0
              metrics=['accuracy'])
history = model.fit(X,
                    y,
                    epochs=10,
                    batch_size=128,
                    validation_data=(Xval, yval))

rw.save(history.history, 'minimalLSTM')

# 2.  Compare your models to the RNN and LSTM already provided in Keras

# 2a. RNN (default configuration)

print('-------------------- Keras native RNN ---------------------')
model = Sequential()
model.add(SimpleRNN(256, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
history = model.fit(X,
                    y,
                    epochs=10,
                    batch_size=128,
                    validation_data=(Xval, yval))

rw.save(history.history, 'nativeRNN')

# 2b. LSTM
示例#19
0
        if embedding_vector is not None:
            # 임베딩 인덱스에 없는 단어는 모두 0이 됩니다.
            embedding_matrix[i] = embedding_vector


# In[123]:



from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense
from keras.layers import SimpleRNN

model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(SimpleRNN(32, input_shape=(3,1)))
model.add(Dense(1, activation='sigmoid'))
model.summary()


# In[124]:



model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False


# In[125]:

示例#20
0
pad = 'pre'

X = pad_sequences(sequences, max_review_length, padding=pad, truncating=pad)
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
input_shape = X_train.shape
K.clear_session()

lstm_model = Sequential()
# We specify the maximum input length to our Embedding layer
# so we can later flatten the embedded inputs

lstm_model.add(Embedding(num_words, 4, input_length=max_review_length))

lstm_model.add(SimpleRNN(32))
lstm_model.add(Dense(1))
lstm_model.add(Activation('sigmoid'))
lstm_model.summary()

lstm_model.compile(optimizer="adam",
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

callbacks_list = [
    ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3),
    EarlyStopping(monitor='val_loss', patience=4),
    ModelCheckpoint(filepath='imdb_rnn_model.h5',
                    monitor='val_loss',
                    save_best_only=True),
    TensorBoard("./imdb_rnn_logs")
示例#21
0
    return (x_train, x_test, y_train)


x_train, x_test, y_train = load_dataset()

import tensorflow.python.keras
from keras.models import Sequential
from keras.layers import Dense, Activation, SimpleRNN
import pandas as pd

hid_dim = 10

model = Sequential()

# input_shape=(系列長T, x_tの次元), output_shape=(系列長T, units(=hid_dim))
model.add(SimpleRNN(hid_dim, input_shape=x_train.shape[1:]))
model.add(Dense(y_train.shape[1], activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x_train,
          y_train,
          epochs=50,
          batch_size=100,
          verbose=2,
          validation_split=0.2)

y_pred = np.argmax(model.predict(x_test), 1)

submission = pd.Series(y_pred, name='label')
submission.to_csv('/root/userspace/submission.csv',
                  header=True,
    def test_rnn_layer(self):
        i = 0
        numerical_err_models = []
        shape_err_models = []
        numerical_failiure = 0
        for base_params in self.base_layer_params:
            base_params = dict(zip(self.params_dict.keys(), base_params))
            for rnn_params in self.rnn_layer_params:
                rnn_params = dict(
                    zip(self.simple_rnn_params_dict.keys(), rnn_params))
                model = Sequential()
                model.add(
                    SimpleRNN(
                        base_params['output_dim'],
                        input_length=base_params['input_dims'][1],
                        input_dim=base_params['input_dims'][2],
                        activation=base_params['activation'],
                        return_sequences=base_params['return_sequences'],
                        go_backwards=base_params['go_backwards'],
                        unroll=base_params['unroll'],
                    ))
                mlkitmodel = _get_mlkit_model_from_path(model)
                input_data = generate_input(base_params['input_dims'][0],
                                            base_params['input_dims'][1],
                                            base_params['input_dims'][2])
                keras_preds = model.predict(input_data).flatten()
                if K.tensorflow_backend._SESSION:
                    import tensorflow as tf
                    tf.reset_default_graph()
                    K.tensorflow_backend._SESSION.close()
                    K.tensorflow_backend._SESSION = None
                input_data = np.transpose(input_data, [1, 0, 2])
                coreml_preds = mlkitmodel.predict({'data': input_data
                                                   })['output'].flatten()
                try:
                    self.assertEquals(coreml_preds.shape, keras_preds.shape)
                except AssertionError:
                    print(
                        "Shape error:\nbase_params: {}\nkeras_preds.shape: {}\ncoreml_preds.shape: {}"
                        .format(base_params, keras_preds.shape,
                                coreml_preds.shape))
                    shape_err_models.append(base_params)
                    i += 1
                    continue
                try:
                    max_denominator = np.maximum(
                        np.maximum(np.abs(coreml_preds), np.abs(keras_preds)),
                        1.0)
                    relative_error = coreml_preds / max_denominator - keras_preds / max_denominator
                    for i in range(len(relative_error)):
                        self.assertLessEqual(relative_error[i], 0.01)
                except AssertionError:
                    print(
                        "Assertion error:\nbase_params: {}\nkeras_preds: {}\ncoreml_preds: {}"
                        .format(base_params, keras_preds, coreml_preds))
                    numerical_failiure += 1
                    numerical_err_models.append(base_params)
                i += 1

        self.assertEquals(shape_err_models, [],
                          msg='Shape error models {}'.format(shape_err_models))
        self.assertEquals(numerical_err_models, [],
                          msg='Numerical error models {}\n'
                          'Total numerical failiures: {}/{}\n'.format(
                              numerical_err_models, numerical_failiure, i))
示例#23
0
x_train = np.array(samples[:len(samples) // 2]).astype("float32")
x_test = np.array(samples[len(samples) // 2:]).astype("float32")
y_train = np.array(labels[:len(labels) // 2]).astype("float32")
y_test = np.array(labels[len(labels) // 2:]).astype("float32")

print(x_train.shape)

#epochs
epochs = None
if len(sys.argv) > 1:
    if sys.argv[1].isdigit():
        epochs = int(sys.argv[1])

if epochs == None:
    epochs = 1000

model = Sequential()
model.add(
    SimpleRNN(len(gestures),
              activation="relu",
              input_shape=(sequence_length, channels)))
model.compile(optimizer='adadelta',
              loss='mean_squared_error',
              metrics=['accuracy'])
model.fit(x_train,
          y_train,
          epochs=epochs,
          batch_size=len(x_train),
          validation_data=(x_test, y_test))
model.save("models/rnn.h5")
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('----- x_train shape:', x_train.shape)
print('----- x_test  shape:', x_test.shape)

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

print('========== 2.Building model...')
model = Sequential()
model.add(
    SimpleRNN(hidden_units,
              kernel_initializer=initializers.RandomNormal(stddev=0.001),
              recurrent_initializer=initializers.Identity(gain=1.0),
              activation='relu',
              input_shape=x_train.shape[1:]))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=rmsprop,
              metrics=['accuracy'])

model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))
示例#25
0
x_train_sq_pd = pad_sequences(x_train_sq, maxlen=threshold)
x_val_sq_pd = pad_sequences(x_val_sq, maxlen=threshold)
x_test_sq_pd = pad_sequences(x_test_sq, maxlen=threshold)

# # shallow vanila RNN

# In[22]:

model = Sequential()
model.add(
    Embedding(size_of_vocabulary_dataset,
              300,
              weights=[embedding_matrix],
              input_length=threshold,
              trainable=False))
model.add(SimpleRNN(64, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
mc = ModelCheckpoint('best_model_shallow_VRNN.h',
                     monitor='val_acc',
                     mode='max',
                     save_best_only=True,
                     verbose=1)

#Print summary of model
print(model.summary())

# In[ ]:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
                              inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)

# define model
model = Sequential()
model.add(
    SimpleRNN(units=n_neurons,
              activation='tanh',
              input_shape=(None, train_X.shape[2]),
              use_bias=True,
              bias_regularizer=L1L2(l1=0.01, l2=0.01),
              return_sequences=False))
model.add(Dropout(.145))
model.add(Dense(activation='linear', units=n_ahead - 1, use_bias=True))
adam = keras.optimizers.Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=None,
                             decay=0.0,
                             amsgrad=False)
model.compile(loss=rmse, optimizer=adam)
earlystop = keras.callbacks.EarlyStopping(monitor='loss',
                                          min_delta=0.00000001,
                                          patience=5,
                                          verbose=1,
X_train = []
y_train = []
timesteps = 50
for i in range(timesteps, 1258):
    X_train.append(train_scaled[i - timesteps:i, 0])
    y_train.append(train_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)

X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

y_train = y_train.reshape(-1, 1)

regressor = Sequential()

regressor.add(SimpleRNN(45, return_sequences=True, input_shape=(X_train.shape[1], 1)))
regressor.add(Dropout(0.15))

regressor.add(SimpleRNN(45, return_sequences=True))
regressor.add(Dropout(0.15))

regressor.add(SimpleRNN(45, return_sequences=True))
regressor.add(Dropout(0.15))

regressor.add(SimpleRNN(units=45))
regressor.add(Dropout(0.15))

regressor.add(Dense(units=1))

regressor.compile(optimizer='adam', loss='mean_squared_error')
示例#28
0
##
# ERROR QUE TIRA
# UserWarning: RNN dropout is no longer supported with the Theano
# backend due to technical limitations. You can either set
# `dropout` and `recurrent_dropout` to 0, or use the TensorFlow
# backend. RNN dropout is no longer supported with the Theano backend
##

model = Sequential()
model.add(
    SimpleRNN(
        input_dim=dim_inp,
        units=units_rnn,
        stateful=False,
        #batch_input_shape = (200000,10,2),
        dropout=0,
        recurrent_dropout=0,
        return_sequences=True,
        activation="linear"))

# Que activacion usar ??
model.add(Dense(units=dim_out, activation="linear"))

##
# Formato de input de la red. CUIDADO, las redes neuronales recursivas se manejan con
# "timesteps", con lo cual la data es separada en pedazos. Ver el siguiente link:
# https://stackoverflow.com/questions/38294046/simple-recurrent-neural-network-input-shape
#
##
示例#29
0
# integer encode the documents
tencoded_docs = tt.texts_to_sequences(input_test)
#print(encoded_docs)
# pad documents to a max length of 4 words
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)

# define model
model = Sequential()
e = Embedding(vocab_size,
              100,
              weights=[embedding_matrix],
              input_length=max_length,
              trainable=False)
model.add(e)
model.add(SimpleRNN(lstm_output_size, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(nclass, activation='softmax'))

# compile the model
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs,
          y_train,
          epochs=1,
          verbose=0,
          validation_data=(vpadded_docs, y_valid))
示例#30
0
#------------------------------------------------------------------------------
#Recurrent neural network RNN
#------------------------------------------------------------------------------

#parameters for RNN
#input dimensions
in_dim = trainx.shape[1:3]
#output dimensions
out_dim = trainy.shape[1]

#Recurrent neural network
#Sequential model
model = Sequential()
#fully-connected RNN, the output from previous timestep is to be fed to next timestep
model.add(SimpleRNN(units=100, input_shape=in_dim, activation="relu"))
#units: Positive integer, dimensionality of the output space
model.add(Dense(units=16, activation="relu"))
#adding feed forward layer, 16 neurons in a hidden layer
model.add(Dense(out_dim, activation='linear'))
#output layer, out_dim outputs
model.compile(loss='mse', optimizer='adam')
#Configures the model for training
#https://keras.io/optimizers/
#optimizer='adam'
model.summary()
#https://www.tensorflow.org/api_docs/python/tf/keras/layers/SimpleRNN

#training
model.fit(trainx, trainy, epochs=50, batch_size=16, verbose=1)
#prediction errors