Esempio n. 1
0
def fit_lstm(train, batch_size, nb_epoch, neurons):
    X, y = train[:, 0:-1], train[:, -1]
    X = X.reshape(X.shape[0], 1, X.shape[1])
    model = Sequential()
    model.add(
        LSTM(neurons,
             batch_input_shape=(batch_size, X.shape[1], X.shape[2]),
             stateful=True))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    print("X = {}".format(X.shape))
    print("y = {}".format(y.shape))
    print("LSTM batch size : {}".format((batch_size, X.shape[1], X.shape[2])))
    for i in range(nb_epoch):
        sys.stdout.write("\rRunning epoch: {:8d}/{}".format(i, nb_epoch))
        sys.stdout.flush()
        model.fit(X,
                  y,
                  epochs=1,
                  batch_size=batch_size,
                  verbose=0,
                  shuffle=False)
        model.reset_states()
    print("")
    return model
def create_model(x_train, y_train, x_val, y_val, n_step, n_features):
    callbacks_list = [
        EarlyStopping(
            monitor='val_loss',
            patience=5,
        ),
        ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.3,
            patience=2,
        )
    ]

    a = x_train.shape[0]
    b = x_val.shape[0]
    bigger = a if a > b else b
    gcd = 1
    for i in range(1, int(bigger**0.5) + 1):
        if (a % i == 0) and (b % i == 0) and (gcd < i):
            gcd = i

    model = Sequential()
    model.add(
        LSTM({{choice([64, 128, 256])}},
             return_sequences=True,
             batch_input_shape=(gcd, n_step, n_features),
             stateful=True))
    #if {{choice(['add', 'no'])}} == 'add':
    #    model.add(LSTM({{choice([64, 128, 256])}}, stateful=True, return_sequences=True))
    model.add(Dense(n_features))
    model.compile(optimizer='adam', loss='mae', metrics=['acc'])

    for epoch_idx in range(200):
        hist = model.fit(x_train,
                         y_train,
                         epochs=1,
                         validation_data=(x_val, y_val),
                         callbacks=callbacks_list,
                         shuffle=False,
                         verbose=0,
                         batch_size=gcd)
        model.reset_states()

    return {
        'loss': np.amin(hist.history['val_loss']),
        'status': STATUS_OK,
        'model': model
    }
def fit_lstm(train, batch_size, nb_epoch, neurons):
    X, y = train[:, 0:-1], train[:, -1]
    X = X.reshape(X.shape[0], 1, X.shape[1])
    model = Sequential()
    model.add(
        LSTM(neurons,
             batch_input_shape=(batch_size, X.shape[1], X.shape[2]),
             stateful=True))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    for i in range(nb_epoch):
        model.fit(X,
                  y,
                  epochs=1,
                  batch_size=batch_size,
                  verbose=0,
                  shuffle=False)
        model.reset_states()
    return model
Esempio n. 4
0
model.add(
    CuDNNLSTM(num_hidden,
              batch_input_shape=(batch_size, look_back, 1),
              stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')

for i in range(num_epochs):
    print("Iter: %d of %d" % (i, num_epochs))
    model.fit(trainX,
              trainY,
              epochs=1,
              batch_size=batch_size,
              verbose=2,
              shuffle=False)
    model.reset_states()

trainPredict = model.predict(trainX)
testPredict = model.predict(testX)

trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])

trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))

testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
    class LSTMTrainer(object):
        def __init__(self):
            self._neurons = 7
            self._batch = 100
            self._epoch = 700
            self._features = 1
            self._seq = 0
            self._model = None
            self._dataset = []

        def build_network(self, seq):
            logger.info('build lstm started')
            self._seq = seq
            self._model = Sequential()
            #logger.info('finish adding sequence')
            self._model.add(
                LSTM(self._neurons,
                     batch_input_shape=(self._batch, seq, self._features),
                     stateful=True))
            #logger.info('finish adding lstm layer')
            self._model.add(Dense(units=1))
            #logger.info('finish adding dense layer')
            self._model.compile(loss='mean_squared_error', optimizer='adam')
            logger.info('build lstm finished')

        def reset(self):
            self._dataset = []

        def update(self, value):
            self._dataset.append(value)

        def train_und_export(self, path):
            tra_loss = []
            X, y = self.data_preparation()
            logger.info('start training')
            for i in range(self._epoch):
                history = self._model.fit(X,
                                          y,
                                          epochs=1,
                                          batch_size=self._batch,
                                          verbose=1,
                                          shuffle=False)
                tra_loss.append(history.history['loss'])
                logger.info("loss in epoche " + str(i) + " is: " +
                            str(tra_loss[i]))
                self._model.reset_states()

            logger.info('end of training')
            logger.info('start exporting')
            self._model.save(path)
            logger.info('end of exporting')
            return True

        def data_preparation(self):
            logger.info('from lstm perspective ' + str(len(self._dataset)) +
                        ' points are stored in dataset')
            data = np.reshape(self._dataset,
                              (len(self._dataset), self._features))
            n_samples = data.shape[0]
            X = list()
            y = list()
            # create input sequences for lstm w.r.t. the given sequence length
            for i in range(0, n_samples - self._seq, 1):
                sample = data[i:i + self._seq]
                X.append(sample)
                y.append(data[i + self._seq])

            # convert input into a 2D array
            X = np.array(X)  # suitable dimension for the lstm input
            y = np.array(y)

            return X, y