コード例 #1
0
                     padding='same',
                     use_bias=True)(input_X)
encoded = MaxPooling1D(2, padding='same')(Conv_layer1)
Dropout_Conv = Dropout(rate=0.25, input_shape=(120, 256))(Conv_layer1)
decoded = UpSampling1D(size=2)(encoded)
Dropout_Conv = Dropout(rate=0.25, input_shape=(120, 256))(decoded)
Conv_layer2 = Conv1D(70, (25, ),
                     activation='relu',
                     padding='same',
                     use_bias=True)(Dropout_Conv)

autoencoder = Model(input_X, Conv_layer2)
autoencoder.summary()
myadam = optimizers.adam(lr=0.0001,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
autoencoder.compile(optimizer=myadam, loss='mse')

autoencoder.fit(train_X,
                test_X,
                epochs=80,
                batch_size=1,
                validation_data=(train_Y, train_Y))

# save autoencoder for future use
model_json = autoencoder.to_json()
with open(".\\model\\autoencoder_ND.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
コード例 #2
0
print('TEST_X_3D = ', test_X_3d.shape)

# ------------------[TRAINING AND VALIDATION]----------------------
model = Sequential()
model.add(
    LSTM(8,
         input_shape=(train_X_3d.shape[1], train_X_3d.shape[2]),
         return_sequences=True,
         stateful=False,
         dropout=0))
model.add(LSTM(2, return_sequences=False, stateful=False))
# model.add(LSTM(32,
#                stateful=False))
# model.add(Dense(20))
model.add(Dense(time_step_out))  # make this auto !
adam = optimizers.adam(lr=0.001)
model.compile(loss='mean_absolute_error', optimizer=adam)
print(model.summary())

# checkpoint- this will save the model during training every time the accuracy hits a new highest
filepath = 'air_quality_model.h5'
checkpoint = ModelCheckpoint(
    filepath=filepath,
    monitor='val_loss',
    verbose=1,
    save_best_only=True,
    mode='min',  # for acc, it should b 'max'; for loss, 'min'
    period=1)  # no of epoch btw checkpoints
callback_list = [checkpoint]

history = model.fit(
コード例 #3
0
print(model.summary())


class LossHistory(keras.callbacks.Callback):
    def on_train_begin(self, logs={}):
        self.loss = []
        self.val_acc = []

    def on_batch_end(self, batch, logs={}):
        self.loss.append(logs.get('loss'))

    def on_epoch_end(self, epoch, logs):
        self.val_acc.append(logs.get('val_acc'))


history_cb = LossHistory()

opt = optimizers.adam(lr=.01)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])

batch_size = 250
model.fit(Xtr1,
          ytr1,
          callbacks=[history_cb],
          verbose=1,
          epochs=10000,
          batch_size=batch_size,
          validation_data=(Xts1, yts1))

#works fairly well, still a bit overfit... trying conv layers