Example #1
0
def build_train_model():

    global endo_lag
    global exog_lag
    global lstm_units
    global lstm_batch_size

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    ae_1_obj, ae_2_obj = get_ae_2()
    xformed_exog_train = ae_1_obj.model.predict(X_train)

    _x1 = get_windowed_data(data=xformed_exog_train, window_size=exog_lag)
    _x2 = get_windowed_data(data=Y_train, window_size=endo_lag + 1)
    _x2 = _x2[:, 0:endo_lag]
    _y = _x2[:, -1:]

    num_samples = min(_x1.shape[0], _x2.shape[0])
    _x1 = _x1[-num_samples:]
    _x2 = _x2[-num_samples:]
    _y = _y[-num_samples:, :]

    ae2_inp = np.concatenate([_x1, _x2], axis=-1)
    x = ae_2_obj.model.predict(ae2_inp)

    name = str(exog_lag) + '_' + str(endo_lag)
    lstm_model_obj = lstm_model(name)
    lstm_model_obj.set_hyperparams(lstm_units=lstm_units,
                                   time_step=lstm_time_steps,
                                   epochs=lstm_epochs,
                                   batch_size=lstm_batch_size)

    lstm_model_obj.set_train_data(x, _y)
    train_mse = lstm_model_obj.load_model()
    return ae_1_obj, ae_2_obj, lstm_model_obj, train_mse
Example #2
0
def get_ae_2():
    global exog_lag
    global endo_lag
    ae_obj_1 = get_ae_1()

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    xformed_exog_train = ae_obj_1.model.predict(X_train)
    _x1 = get_windowed_data(data=xformed_exog_train, window_size=exog_lag)
    _y1 = get_windowed_data(data=Y_train, window_size=endo_lag)

    num_samples = min(_x1.shape[0], _y1.shape[0])
    _x1 = _x1[-num_samples:]
    _y1 = _y1[-num_samples:]
    ae2_train_data = np.concatenate([_x1, _y1], axis=-1)
    ae_2_inp_dim = ae2_train_data.shape[-1]

    name = str(2) + '_' + str(exog_lag) + '_' + str(endo_lag)

    ae_obj_2 = ae_class(name)
    ae_obj_2.set_hyperparams(layer_units=ae_2_units,
                             inp_dim=ae_2_inp_dim,
                             batch_size=128,
                             epochs=ae_2_epoch)
    ae_obj_2.set_data(ae2_train_data)
    ae_2_losses = ae_obj_2.load_model()

    return ae_obj_1, ae_obj_2
def get_stacked_ae():

    global ae_epochs
    global batch_size
    if os.path.exists('ae_model.h5'):
        return

    nb_epoch = ae_epochs
    batch_size = batch_size
    X_train, _, _, _, _ = data_feeder.get_data(True)

    num_layers = 3
    shape = [64, 32, 16]
    inp_dim = X_train.shape[-1]

    model = keras.models.Sequential()

    # input_layer = keras.layers.Dense(units=inp_dim,
    #                                  input_dim=inp_dim,
    #                                  activation=None,
    #                                  use_bias=False,
    #                                  activity_regularizer=None)
    # model.add(input_layer)

    for i in range(num_layers):
        if i == 0:
            layer = Dense(shape[i], input_dim=inp_dim, activation='sigmoid')
        else:
            layer = Dense(shape[i], activation='tanh')

        model.add(layer)
        layer = Dense(units=inp_dim, activation='tanh')
        model.add(layer)

        model.compile(optimizer=keras.optimizers.Adam(),
                      loss=keras.losses.MSE,
                      metrics=['accuracy'])

        model.fit(X_train,
                  X_train,
                  epochs=nb_epoch,
                  batch_size=batch_size,
                  shuffle=False)

        for layer in model.layers:
            layer.trainable = False

        model.pop()
    model.save('ae_model.h5')
    return model
Example #4
0
def get_ae_1():

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    # set up autoencoder for exogenous values
    ae_obj_1 = ae_class(1)

    ae_obj_1.set_hyperparams(layer_units=ae_1_units,
                             inp_dim=X_train.shape[-1],
                             batch_size=128,
                             epochs=ae_1_epoch)

    ae_obj_1.set_data(X_train)
    ae_1_losses = ae_obj_1.load_model()
    return ae_obj_1
def get_train_data():
    global ae_obj
    global window_size

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    # set up train data
    ae_obj, X_train_ae = pretrain_ae(X_train)
    train_y_inp, train_y_op = get_windowed_y(Y_train, window_size=window_size)
    train_exog_inp = X_train_ae[window_size:, :]
    print ' train_exog_inp ', train_exog_inp.shape
    # concatenate train_exog_inp and train_y_op
    train_data_x = np.concatenate([train_y_inp, train_exog_inp], axis=1)
    print 'train_data_x', train_data_x.shape
    print 'Train Data'
    print 'Train_data_x ', train_data_x.shape
    print 'train_data_x', train_y_op.shape
    return train_data_x, train_y_op
def create_complete_model():
    # ------------ #

    global lstm_time_step
    global batch_size
    global lstm_epochs
    global window_size

    # ------------ #

    if os.path.exists('model_1.h5'):
        return

    ae_model = load_model('ae_model.h5')
    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    # create windows
    inp, op = get_windowed_inp(Y_train, window_size)
    num_samples = inp.shape[0]
    # print 'num_samples', num_samples

    X_train = X_train[-num_samples:, :]
    # print 'X_train', X_train.shape
    # concatenate X_train and input passed through ae
    ae_op = ae_model.predict(X_train)
    # print 'ae_op', ae_op.shape

    inp = np.concatenate([inp, ae_op], axis=-1)
    # print 'inp shape', inp.shape

    # Reshape the data

    pad_len = lstm_time_step - (inp.shape[0] % lstm_time_step)

    # add padding
    _inp_pad = np.zeros(shape=[pad_len, inp.shape[-1]])
    _op_pad = np.zeros(shape=[pad_len, op.shape[-1]])

    inp = np.concatenate([_inp_pad, inp], axis=0)
    op = np.concatenate([_op_pad, op], axis=0)
    inp_dim = inp.shape[-1]
    op_dim = op.shape[-1]

    inp = np.reshape(inp, [-1, lstm_time_step, inp_dim])
    op = np.reshape(op, [-1, lstm_time_step, op_dim])

    # Ensure number of samples are divisible by batch size

    num_samples = (inp.shape[0] // batch_size) * batch_size
    inp = inp[-num_samples:, :, :]
    op = op[-num_samples:, :, :]

    model = keras.models.Sequential()

    batch_input_shape = [batch_size, inp.shape[1], inp.shape[-1]]
    lstm1 = keras.layers.LSTM(
        16,
        use_bias=True,
        batch_input_shape=batch_input_shape,
        dropout=0.25,
        stateful=True,
        return_sequences=True,
    )
    model.add(lstm1)
    lstm2 = keras.layers.LSTM(
        16,
        use_bias=True,
        dropout=0.25,
        stateful=True,
        return_sequences=True,
    )
    model.add(lstm2)
    d1 = keras.layers.TimeDistributed(Dense(units=8, activation='tanh'))
    model.add(d1)
    d2 = keras.layers.TimeDistributed(Dense(units=1, activation='tanh'))
    model.add(d2)

    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.MSE,
                  metrics=['mse'])

    # Ensure number of samples divisible by batch size

    history = model.fit(inp,
                        op,
                        epochs=lstm_epochs,
                        batch_size=batch_size,
                        shuffle=False,
                        verbose=False)

    t_loss = history.history['loss']

    # plt.figure()
    # plt.title('Training Loss', fontsize=20)
    # plt.ylabel('Mean Square Error', fontsize=20)
    # plt.plot(range(len(t_loss)), t_loss, 'r-')
    # plt.xlabel('Epochs', fontsize=20)
    # plt.yticks(np.arange(0, 2.2, 0.2))
    # # plt.show()
    model.save('model_1.h5')

    return np.mean(t_loss)
def val_test_model(val=False):

    # -------- #

    global lstm_time_step
    global batch_size
    global epochs
    global window_size

    # --------- #

    ae_model = load_model('ae_model.h5')
    lstm_model = load_model('model_1.h5')

    if val == True:
        _, _, X_test, _, _, Y_test, _ = data_feeder.get_data_val(True)
    else:
        _, X_test, _, Y_test, _ = data_feeder.get_data(True)

    # create windows
    inp, op = get_windowed_inp(Y_test, window_size)
    num_samples = inp.shape[0]
    print 'num_samples', num_samples

    X_test = X_test[-num_samples:, :]
    print 'X_test', X_test.shape
    # concatenate X_train and input passed through ae
    ae_op = ae_model.predict(X_test)
    # print 'ae_op', ae_op.shape

    inp = np.concatenate([inp, ae_op], axis=-1)
    print 'inp shape', inp.shape

    # Reshape the data

    pad_len = lstm_time_step - (inp.shape[0] % lstm_time_step)

    # add padding
    _inp_pad = np.zeros(shape=[pad_len, inp.shape[-1]])
    _op_pad = np.zeros(shape=[pad_len, op.shape[-1]])

    inp = np.concatenate([_inp_pad, inp], axis=0)
    op = np.concatenate([_op_pad, op], axis=0)
    inp_dim = inp.shape[-1]
    op_dim = op.shape[-1]

    inp = np.reshape(inp, [-1, lstm_time_step, inp_dim])
    op = np.reshape(op, [-1, lstm_time_step, op_dim])

    num_samples = (inp.shape[0] // batch_size) * batch_size
    inp = inp[-num_samples:, :, :]
    op = op[-num_samples:, :, :]

    # -------- #

    print 'Shape input dimension ', inp.shape
    print 'Shape output dimension ', op.shape

    test_x = inp
    test_y = op
    score = lstm_model.evaluate(x=test_x, y=test_y, batch_size=batch_size)
    print 'Mean Square Error', score[0]
    return score[0]