def get_ae_2():
    global exog_lag
    global endo_lag
    ae_obj_1 = get_ae_1()

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    xformed_exog_train = ae_obj_1.model.predict(X_train)
    _x1 = get_windowed_data(data=xformed_exog_train, window_size=exog_lag)
    _y1 = get_windowed_data(data=Y_train, window_size=endo_lag)

    num_samples = min(_x1.shape[0], _y1.shape[0])
    _x1 = _x1[-num_samples:]
    _y1 = _y1[-num_samples:]
    ae2_train_data = np.concatenate([_x1, _y1], axis=-1)
    ae_2_inp_dim = ae2_train_data.shape[-1]

    name = str(2) + '_' + str(exog_lag) + '_' + str(endo_lag)

    ae_obj_2 = ae_class(name)
    ae_obj_2.set_hyperparams(layer_units=ae_2_units,
                             inp_dim=ae_2_inp_dim,
                             batch_size=128,
                             epochs=ae_2_epoch)
    ae_obj_2.set_data(ae2_train_data)
    ae_2_losses = ae_obj_2.load_model()

    return ae_obj_1, ae_obj_2
def build_train_model():

    global endo_lag
    global exog_lag
    global lstm_units
    global lstm_batch_size

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    ae_1_obj, ae_2_obj = get_ae_2()
    xformed_exog_train = ae_1_obj.model.predict(X_train)

    _x1 = get_windowed_data(data=xformed_exog_train, window_size=exog_lag)
    _x2 = get_windowed_data(data=Y_train, window_size=endo_lag + 1)
    _x2 = _x2[:, 0:endo_lag]
    _y = _x2[:, -1:]

    num_samples = min(_x1.shape[0], _x2.shape[0])
    _x1 = _x1[-num_samples:]
    _x2 = _x2[-num_samples:]
    _y = _y[-num_samples:, :]

    ae2_inp = np.concatenate([_x1, _x2], axis=-1)
    x = ae_2_obj.model.predict(ae2_inp)

    name = str(exog_lag) + '_' + str(endo_lag)
    lstm_model_obj = lstm_model(name)
    lstm_model_obj.set_hyperparams(lstm_units=lstm_units,
                                   time_step=lstm_time_steps,
                                   epochs=lstm_epochs,
                                   batch_size=lstm_batch_size)

    lstm_model_obj.set_train_data(x, _y)
    train_mse = lstm_model_obj.load_model()
    return ae_1_obj, ae_2_obj, lstm_model_obj, train_mse
    def train_model(self):

        exog_train, _, end_train, _, _ = data_feeder.get_data(True)
        x1, x2, y = self.format_data_xy(exog_train, end_train)
        hist = self.model.fit([x1,x2],y,epochs=self.epochs)
        print hist.history['loss']
        # self.model.save(self.model_file)
        return
def ctdnn_get_data(window_size, type='trian'):
    exog_train, exog_test, end_train, end_test, scaler_array = data_feeder.get_data(True)

    if type == 'train':
        train_x_exog, train_x_end, train_y = ctdnn_get_data_aux(exog_train, end_train, window_size)
        return train_x_exog, train_x_end, train_y
    else:
        test_x_exog, test_x_end, test_y = ctdnn_get_data_aux(exog_test, end_test, window_size)
        return test_x_exog, test_x_end, test_y
Beispiel #5
0
def FTDNN():

    global target_window
    batch_size = 256

    X_train, X_test, Y_train, Y_test, scaler_array = data_feeder.get_data(True)

    # print X_train.shape
    # print Y_train.shape
    train_windowed_data = get_windowed_data_ex(Y_train, target_window + 1)
    test_windowed_data = get_windowed_data_ex(Y_test, target_window + 1)
    print train_windowed_data.shape
    # Set up training input and output
    x_train = []
    y_train = []

    for i in range(train_windowed_data.shape[0]):
        x_train.append(train_windowed_data[i, 0:target_window])
        y_train.append(train_windowed_data[i, -1:])
    x_train = np.asarray(x_train)
    y_train = np.asarray(y_train)

    x_train = np.asarray(x_train)
    x_train = np.reshape(x_train, [x_train.shape[0], x_train.shape[1]])
    y_train = np.asarray(y_train)
    y_train = np.reshape(y_train, [y_train.shape[0], y_train.shape[1]])

    print x_train.shape
    print y_train.shape

    x_test = []
    y_test = []

    for i in range(test_windowed_data.shape[0]):
        x_test.append(test_windowed_data[i, 0:target_window])
        y_test.append(test_windowed_data[i, -1:])

    x_test = np.asarray(x_test)
    y_test = np.asarray(y_test)

    x_test = np.asarray(x_test)
    x_test = np.reshape(x_test, [x_test.shape[0], x_test.shape[1]])
    y_test = np.asarray(y_test)
    y_test = np.reshape(y_test, [y_test.shape[0], y_test.shape[1]])

    print x_train.shape
    print y_test.shape

    model = FTDNN_model(target_window)
    history = model.fit(x_test, y_train, epochs=2, batch_size=batch_size)
    train_loss = history.history['loss']
    print train_loss
    # Plot Training Loss
    # _plot(range(len(train_loss)), train_loss,
    #        ' Training Loss in Focused TDNN with target series input only. Batch size of 128')
    score = model.evaluate(x_test, y_test, batch_size=128)
    print score
def get_stacked_ae():

    global ae_epochs
    global batch_size
    if os.path.exists('ae_model.h5'):
        return

    nb_epoch = ae_epochs
    batch_size = batch_size
    X_train, _, _, _, _ = data_feeder.get_data(True)

    num_layers = 3
    shape = [64, 32, 16]
    inp_dim = X_train.shape[-1]

    model = keras.models.Sequential()

    # input_layer = keras.layers.Dense(units=inp_dim,
    #                                  input_dim=inp_dim,
    #                                  activation=None,
    #                                  use_bias=False,
    #                                  activity_regularizer=None)
    # model.add(input_layer)

    for i in range(num_layers):
        if i == 0:
            layer = Dense(shape[i], input_dim=inp_dim, activation='sigmoid')
        else:
            layer = Dense(shape[i], activation='tanh')

        model.add(layer)
        layer = Dense(units=inp_dim, activation='tanh')
        model.add(layer)

        model.compile(optimizer=keras.optimizers.Adam(),
                      loss=keras.losses.MSE,
                      metrics=['accuracy'])

        model.fit(X_train,
                  X_train,
                  epochs=nb_epoch,
                  batch_size=batch_size,
                  shuffle=False)

        for layer in model.layers:
            layer.trainable = False

        model.pop()
    model.save('ae_model.h5')
    return model
def get_ae_1():

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    # set up autoencoder for exogenous values
    ae_obj_1 = ae_class(1)

    ae_obj_1.set_hyperparams(layer_units=ae_1_units,
                             inp_dim=X_train.shape[-1],
                             batch_size=128,
                             epochs=ae_1_epoch)

    ae_obj_1.set_data(X_train)
    ae_1_losses = ae_obj_1.load_model()
    return ae_obj_1
def get_data(time_window, type ):

    # window of training data = time_window
    X_train, X_test, Y_train, Y_test, _ = data_feeder.get_data(std=True)

    if type == 'test':
        y = Y_test
    else :
        y = Y_train

    y = np.asanyarray(y)
    y = np.reshape(y, [-1, 1])
    res = utils.get_windowed_data(y, time_window + 1)
    x = res[:, 0:time_window]
    y = res[:, -1:]
    x = np.reshape(x, [x.shape[0], time_window, 1])
    y = np.reshape(y, [y.shape[0], 1])
    return x, y
def get_train_data():
    global ae_obj
    global window_size

    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    # set up train data
    ae_obj, X_train_ae = pretrain_ae(X_train)
    train_y_inp, train_y_op = get_windowed_y(Y_train, window_size=window_size)
    train_exog_inp = X_train_ae[window_size:, :]
    print ' train_exog_inp ', train_exog_inp.shape
    # concatenate train_exog_inp and train_y_op
    train_data_x = np.concatenate([train_y_inp, train_exog_inp], axis=1)
    print 'train_data_x', train_data_x.shape
    print 'Train Data'
    print 'Train_data_x ', train_data_x.shape
    print 'train_data_x', train_y_op.shape
    return train_data_x, train_y_op
def test_model():

    ae_model = load_model('ae_model.h5')
    model = load_model('model_1.h5')

    _, X_test, _, Y_test, scaler_array = data_feeder.get_data(True)

    window_size = 8
    # create windows
    inp, op = get_windowed_inp(Y_test, window_size)

    num_samples = inp.shape[0]
    X_test = X_test[-num_samples:, :]
    ae_op = ae_model.predict(X_test)
    inp = np.concatenate([inp, ae_op], axis=1)

    # Reshape the data
    lstm_batch_size = 128
    pad_len = lstm_batch_size - (inp.shape[0] % lstm_batch_size)

    # add padding
    _inp_pad = np.zeros(shape=[pad_len, inp.shape[-1]])
    _op_pad = np.zeros(shape=[pad_len, op.shape[-1]])

    inp = np.concatenate([_inp_pad, inp], axis=0)
    op = np.concatenate([_op_pad, op], axis=0)
    inp_dim = inp.shape[-1]
    op_dim = op.shape[-1]

    # print 'Shape input dimension ', inp_dim
    # print 'Shape output dimension ', op_dim

    inp = np.reshape(inp, [-1, lstm_batch_size, inp_dim])
    op = np.reshape(op, [-1, lstm_batch_size, op_dim])

    print 'Shape input dimension ', inp.shape
    print 'Shape output dimension ', op.shape

    test_x = inp
    test_y = op
    score = model.evaluate(x=test_x, y=test_y, batch_size=1)
    print score
    return
Beispiel #11
0
def get_training_data(time_window, endog_only=True):
    # window of training data = time_window
    _, _, z, _, _ = data_feeder.get_data(std=True)

    if endog_only == True:
        z = np.asanyarray(z)
        print '>>', z.shape
        z = np.reshape(z, [-1, 1])

        print z.shape
        res = utils.get_windowed_data(z, time_window)
        print ' >>> ', res.shape
        _x = res[:, -1:]
        _y = res[:, 0:time_window]
        print ' >>> ', _x.shape, _y.shape
        _x = np.reshape(_x, [_x.shape[0], 1, 1])
        _y = np.reshape(_y, [_y.shape[0], time_window, 1])
        return _y, _x
    else:
        pass

    return
Beispiel #12
0
def get_training_data(time_window):
    # window of training data = time_window
    # X : exogenous series
    # Y : target series

    exog_train, exog_test, end_train, end_test, _ = data_feeder.get_data(
        std=True)
    exog_dim = exog_train.shape[-1]
    res = utils.get_windowed_data(exog_train, time_window)
    res = np.reshape(res, [-1, time_window, 1, exog_dim])
    # time_window, 1, exog_dim
    exog_train_x = res

    res = utils.get_windowed_data(end_train, time_window)
    res = np.reshape(res, [-1, time_window, 1])
    end_train_x = res

    n_samples = res.shape[0]
    res = utils.get_windowed_data(end_train, 1)
    res = res[-n_samples:, :]
    res = np.reshape(res, [-1, 1, 1])
    end_train_y = res

    return exog_train_x, end_train_x, end_train_y
def create_complete_model():

    model = load_model('ae_model.h5')
    X_train, X_test, Y_train, Y_test, scaler_array = data_feeder.get_data(True)

    window_size = 8
    # create windows
    inp, op = get_windowed_inp(Y_train, window_size)
    num_samples = inp.shape[0]
    print ' num samples', num_samples

    X_train = X_train[-num_samples:, :]
    # concatenate X_train and input passed through ae
    ae_op = model.predict(X_train)
    print 'Shape of op from Auto encoder', ae_op.shape
    inp = np.concatenate([inp, ae_op], axis=1)

    # Reshape the data
    lstm_batch_size = 128
    pad_len = lstm_batch_size - (inp.shape[0] % lstm_batch_size)

    # add padding
    _inp_pad = np.zeros(shape=[pad_len, inp.shape[-1]])
    _op_pad = np.zeros(shape=[pad_len, op.shape[-1]])

    inp = np.concatenate([_inp_pad, inp], axis=0)
    op = np.concatenate([_op_pad, op], axis=0)
    inp_dim = inp.shape[-1]
    op_dim = op.shape[-1]

    inp = np.reshape(inp, [-1, lstm_batch_size, inp_dim])
    op = np.reshape(op, [-1, lstm_batch_size, op_dim])

    model = keras.models.Sequential()

    epochs = 100
    batch_input_shape = [1, inp.shape[1], inp.shape[-1]]
    lstm1 = keras.layers.LSTM(
        128,
        use_bias=True,
        batch_input_shape=batch_input_shape,
        dropout=0.25,
        stateful=True,
        return_sequences=True,
    )
    model.add(lstm1)
    lstm2 = keras.layers.LSTM(
        64,
        use_bias=True,
        dropout=0.25,
        stateful=True,
        return_sequences=True,
    )
    model.add(lstm2)
    d1 = Dense(units=64, activation='tanh')
    model.add(d1)
    d2 = Dense(units=1, activation='tanh')
    model.add(d2)

    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.MSE,
                  metrics=['mse'])

    history = model.fit(inp, op, epochs=epochs, batch_size=1, shuffle=False)

    print model.summary()

    t_loss = history.history['loss']
    plt.figure()
    plt.title('Training Loss', fontsize=20)
    plt.ylabel('Mean Square Error', fontsize=20)
    plt.plot(range(len(t_loss)), t_loss, 'r-')
    plt.xlabel('Epochs', fontsize=20)
    plt.yticks(np.arange(0, 2.2, 0.2))
    plt.show()
    model.save('model_1.h5')
    return
 def test_model(self):
     _, exog_test,  _, end_test, _ = data_feeder.get_data(True)
     x1, x2, y = self.format_data_xy(exog_test, end_test)
     print self.model.evaluate([x1,x2],y)
def val_test_model(val=False):

    # -------- #

    global lstm_time_step
    global batch_size
    global epochs
    global window_size

    # --------- #

    ae_model = load_model('ae_model.h5')
    lstm_model = load_model('model_1.h5')

    if val == True:
        _, _, X_test, _, _, Y_test, _ = data_feeder.get_data_val(True)
    else:
        _, X_test, _, Y_test, _ = data_feeder.get_data(True)

    # create windows
    inp, op = get_windowed_inp(Y_test, window_size)
    num_samples = inp.shape[0]
    print 'num_samples', num_samples

    X_test = X_test[-num_samples:, :]
    print 'X_test', X_test.shape
    # concatenate X_train and input passed through ae
    ae_op = ae_model.predict(X_test)
    # print 'ae_op', ae_op.shape

    inp = np.concatenate([inp, ae_op], axis=-1)
    print 'inp shape', inp.shape

    # Reshape the data

    pad_len = lstm_time_step - (inp.shape[0] % lstm_time_step)

    # add padding
    _inp_pad = np.zeros(shape=[pad_len, inp.shape[-1]])
    _op_pad = np.zeros(shape=[pad_len, op.shape[-1]])

    inp = np.concatenate([_inp_pad, inp], axis=0)
    op = np.concatenate([_op_pad, op], axis=0)
    inp_dim = inp.shape[-1]
    op_dim = op.shape[-1]

    inp = np.reshape(inp, [-1, lstm_time_step, inp_dim])
    op = np.reshape(op, [-1, lstm_time_step, op_dim])

    num_samples = (inp.shape[0] // batch_size) * batch_size
    inp = inp[-num_samples:, :, :]
    op = op[-num_samples:, :, :]

    # -------- #

    print 'Shape input dimension ', inp.shape
    print 'Shape output dimension ', op.shape

    test_x = inp
    test_y = op
    score = lstm_model.evaluate(x=test_x, y=test_y, batch_size=batch_size)
    print 'Mean Square Error', score[0]
    return score[0]

def pretrain_ae(X_train):

    data_x = X_train
    ae_obj = ae()
    ae_obj.set_data(data_x)
    ae_obj.layerwise_train()
    ae_op = ae_obj.reduce(X_train)
    print ae_op.shape
    return ae_obj, ae_op


# --------------------------------------------------------------------------------------- #

X_train, X_test, Y_train, Y_test, scaler_array = data_feeder.get_data(True)

# ----------------- #
# Create sliding window of target value #
# ----------------- #


def get_windowed_y(data, window_size):
    y = list(np.reshape(data, [data.shape[0]]))
    print len(y)

    # local function
    def window(iterable, size):
        iters = tee(iterable, size)
        for i in xrange(1, size):
            for each in iters[i:]:
def create_complete_model():
    # ------------ #

    global lstm_time_step
    global batch_size
    global lstm_epochs
    global window_size

    # ------------ #

    if os.path.exists('model_1.h5'):
        return

    ae_model = load_model('ae_model.h5')
    X_train, _, Y_train, _, _ = data_feeder.get_data(True)

    # create windows
    inp, op = get_windowed_inp(Y_train, window_size)
    num_samples = inp.shape[0]
    # print 'num_samples', num_samples

    X_train = X_train[-num_samples:, :]
    # print 'X_train', X_train.shape
    # concatenate X_train and input passed through ae
    ae_op = ae_model.predict(X_train)
    # print 'ae_op', ae_op.shape

    inp = np.concatenate([inp, ae_op], axis=-1)
    # print 'inp shape', inp.shape

    # Reshape the data

    pad_len = lstm_time_step - (inp.shape[0] % lstm_time_step)

    # add padding
    _inp_pad = np.zeros(shape=[pad_len, inp.shape[-1]])
    _op_pad = np.zeros(shape=[pad_len, op.shape[-1]])

    inp = np.concatenate([_inp_pad, inp], axis=0)
    op = np.concatenate([_op_pad, op], axis=0)
    inp_dim = inp.shape[-1]
    op_dim = op.shape[-1]

    inp = np.reshape(inp, [-1, lstm_time_step, inp_dim])
    op = np.reshape(op, [-1, lstm_time_step, op_dim])

    # Ensure number of samples are divisible by batch size

    num_samples = (inp.shape[0] // batch_size) * batch_size
    inp = inp[-num_samples:, :, :]
    op = op[-num_samples:, :, :]

    model = keras.models.Sequential()

    batch_input_shape = [batch_size, inp.shape[1], inp.shape[-1]]
    lstm1 = keras.layers.LSTM(
        16,
        use_bias=True,
        batch_input_shape=batch_input_shape,
        dropout=0.25,
        stateful=True,
        return_sequences=True,
    )
    model.add(lstm1)
    lstm2 = keras.layers.LSTM(
        16,
        use_bias=True,
        dropout=0.25,
        stateful=True,
        return_sequences=True,
    )
    model.add(lstm2)
    d1 = keras.layers.TimeDistributed(Dense(units=8, activation='tanh'))
    model.add(d1)
    d2 = keras.layers.TimeDistributed(Dense(units=1, activation='tanh'))
    model.add(d2)

    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.MSE,
                  metrics=['mse'])

    # Ensure number of samples divisible by batch size

    history = model.fit(inp,
                        op,
                        epochs=lstm_epochs,
                        batch_size=batch_size,
                        shuffle=False,
                        verbose=False)

    t_loss = history.history['loss']

    # plt.figure()
    # plt.title('Training Loss', fontsize=20)
    # plt.ylabel('Mean Square Error', fontsize=20)
    # plt.plot(range(len(t_loss)), t_loss, 'r-')
    # plt.xlabel('Epochs', fontsize=20)
    # plt.yticks(np.arange(0, 2.2, 0.2))
    # # plt.show()
    model.save('model_1.h5')

    return np.mean(t_loss)
Beispiel #18
0
                next(each, None)
        return izip(*iters)

    op = []
    for w in window(data, window_size):
        w = np.reshape(w, [-1])
        op.append(w)

    op = np.asarray(op)
    return op


# ------ #
# Get Data
# ------ #
X_train, _, Y_train, _, _ = data_feeder.get_data(True)

# set up autoencoder for exogenous values
ae_obj_1 = ae_class(1)
ae_obj_1.set_hyperparams(layer_units=[64, 32, 16],
                         inp_dim=X_train.shape[-1],
                         batch_size=512,
                         epochs=ae_1_epoch)

print 'Shape of training data for 1st auto encoder', X_train.shape
ae_obj_1.set_data(X_train)
ae_1_losses, ae_1_acc = ae_obj_1.load_model()
print 'Auto Encoder 1'
# ae_obj_1.model.summary()
# ------ #