예제 #1
0
def make_plots(context, predictions_timesteps, true_values, look_ahead, title,
               path, save_figure, Xserver):
    step = 1
    if look_ahead > 1:
        step = look_ahead - 1
    for idx, i in enumerate(np.arange(0, look_ahead, step)):
        fig = plt.figure()
        #plt.title(title+" Timestep: %d "%i)
        plt.xlabel("Time step")
        plt.ylabel("Power Consumption")
        plt.plot(true_values,
                 label="True value",
                 linewidth=1,
                 color=sns.xkcd_rgb["denim blue"])
        plt.plot(predictions_timesteps[:, i],
                 label="Predicted value",
                 linewidth=1,
                 linestyle="--",
                 color=sns.xkcd_rgb["medium green"])
        error = abs(true_values - predictions_timesteps[:, i])
        plt.plot(error,
                 label='Error',
                 color=sns.xkcd_rgb["pale red"],
                 linewidth=0.5)
        plt.legend(bbox_to_anchor=(1, .99))
        plt.tight_layout()
        if save_figure:
            util.save_figure(path, "%s_timestep_%d" % (context, i), fig)

    if Xserver:
        plt.show()
def make_plots(context,predictions_timesteps,true_values,look_ahead,title,path,save_figure):
    step = 1
    if look_ahead > 1:
        step = look_ahead - 1
    for idx, i in enumerate(np.arange(0, look_ahead, step)):
        fig = plt.figure()
        plt.title(" Timestep: %d "%i)
        plt.xlabel("Time Step")
        plt.ylabel("Power Consumption")
        plt.plot(true_values, label="actual", linewidth=1)
        plt.plot(predictions_timesteps[:, i], label="prediction", linewidth=1, linestyle="--")
        error = abs(true_values - predictions_timesteps[:, i])
        plt.plot(error, label="error", linewidth=0.5)
        plt.legend()
        plt.tight_layout()
        if save_figure:
            util.save_figure(path,"%s_timestep_%d"%(context,i), fig)
        plt.show()
def make_plots(context,predictions_timesteps,true_values,look_ahead,title,path,save_figure,Xserver):
    step = 1
    if look_ahead > 1:
        step = look_ahead - 1
    for idx, i in enumerate(np.arange(0, look_ahead, step)):
        fig = plt.figure()
        #plt.title(title+" Timestep: %d "%i)
        plt.xlabel("Time Step")
        plt.ylabel("Power Consumption")
        plt.plot(true_values, label="actual", linewidth=1)
        plt.plot(predictions_timesteps[:, i], label="prediction", linewidth=1, linestyle="--")
        error = abs(true_values - predictions_timesteps[:, i])
        plt.plot(error, label="error", linewidth=0.5)
        plt.legend()
        plt.tight_layout()
        if save_figure:
            util.save_figure(path,"%s_timestep_%d"%(context,i), fig)

    if Xserver:
        plt.show()
예제 #4
0
def make_plots(context,predictions_timesteps,true_values,look_ahead,title,path,save_figure,Xserver):
    step = 1
    if look_ahead > 1:
        step = look_ahead - 1
    for idx, i in enumerate(np.arange(0, look_ahead, step)):
        fig = plt.figure()
        #plt.title(title+" Timestep: %d "%i)
        plt.xlabel("Time step")
        plt.ylabel("Power Consumption")
        plt.plot(true_values, label="True value", linewidth=1,color=sns.xkcd_rgb["denim blue"])
        plt.plot(predictions_timesteps[:, i], label="Predicted value", linewidth=1, linestyle="--",color=sns.xkcd_rgb["medium green"])
        error = abs(true_values - predictions_timesteps[:, i])
        plt.plot(error, label='Error',color=sns.xkcd_rgb["pale red"], linewidth=0.5)
        plt.legend(bbox_to_anchor=(1, .99))
        plt.tight_layout()
        if save_figure:
            util.save_figure(path,"%s_timestep_%d"%(context,i), fig)

    if Xserver:
        plt.show()
def run():
    print "************* RUNING LSTM PREDICTOR *************"
    # load config settings
    experiment_id = cfg.run_config['experiment_id']
    data_folder = cfg.run_config['data_folder']
    look_back = cfg.multi_step_lstm_config['look_back']
    # look_back = lookback
    look_ahead = cfg.multi_step_lstm_config['look_ahead']
    # look_ahead = lookahead
    batch_size = cfg.multi_step_lstm_config['batch_size']
    epochs = cfg.multi_step_lstm_config['n_epochs']
    # epochs = epoch
    dropout = cfg.multi_step_lstm_config['dropout']
    layers = cfg.multi_step_lstm_config['layers']
    loss = cfg.multi_step_lstm_config['loss']
    # optimizer = cfg.multi_step_lstm_config['optimizer']
    shuffle = cfg.multi_step_lstm_config['shuffle']
    patience = cfg.multi_step_lstm_config['patience']
    validation = cfg.multi_step_lstm_config['validation']
    learning_rate = cfg.multi_step_lstm_config['learning_rate']
    logging.info(
        "--------------------------Start Running--------------------------")
    logging.info('Run id %s' % (experiment_id))
    logging.info(" HYPERPRAMRAMS : %s" % (str(locals())))

    train_scaler, X_train, y_train, X_validation1, y_validation1, X_validation2, y_validation2, validation2_labels, \
        X_test, y_test, test_labels = util.load_data(
            data_folder, look_back, look_ahead)

    multistep_lstm = lstm.MultiStepLSTM(look_back=look_back, look_ahead=look_ahead,
                                        layers=layers,
                                        dropout=dropout, loss=loss, learning_rate=learning_rate)
    model = multistep_lstm.build_model()
    # if cfg.run_config['save_figure']:
    #     plot_model(model, to_file="imgs/%s_lstm.png" %
    #                (experiment_id), show_shapes=True, show_layer_names=True)
    # train model on training set. validation1 set is used for early stopping

    fig = plt.figure()
    history = lstm.train_model(model, X_train, y_train, batch_size, epochs,
                               shuffle, validation, (X_validation1, y_validation1), patience)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    # TODO: Show Plot!
    # plt.show()
    if cfg.run_config['save_figure']:
        util.save_figure("%s/%s/" %
                         ("imgs", experiment_id), "train_errors", fig)

    validation2_loss = model.evaluate(
        X_validation2, y_validation2, batch_size=batch_size, verbose=2)
    print "Validation2 Loss %s" % (validation2_loss)
    logging.info("Validation2 Loss %s" % (validation2_loss))
    test_loss = model.evaluate(
        X_test, y_test, batch_size=batch_size, verbose=2)
    print "Test Loss %s" % (test_loss)
    logging.info("Test Loss %s" % (test_loss))

    print "----------- Training finished, start making predictions -----------"
    logging.info(
        "----------- Training finished, start making predictions -----------")

    predictions_train, y_true_train = get_predictions("Train", model, X_train, y_train, train_scaler,
                                                      batch_size, look_ahead, look_back, epochs, experiment_id,
                                                      )
    np.save(data_folder + "train_predictions", predictions_train)
    np.save(data_folder + "train_true", y_true_train)

    predictions_validation1, y_true_validation1 = get_predictions("Validation1", model, X_validation1, y_validation1,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                  )
    predictions_validation1_scaled = train_scaler.transform(
        predictions_validation1)
    print "Calculated validation1 loss %f" % (mean_squared_error(
        np.reshape(y_validation1, [len(y_validation1), look_ahead]),
        np.reshape(predictions_validation1_scaled, [len(predictions_validation1_scaled), look_ahead])))
    np.save(data_folder + "validation1_predictions", predictions_validation1)
    np.save(data_folder + "validation1_true", y_true_validation1)
    np.save(data_folder + "validation1_labels", validation2_labels)

    predictions_validation2, y_true_validation2 = get_predictions("Validation2", model, X_validation2, y_validation2,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                  )
    predictions_validation2_scaled = train_scaler.transform(
        predictions_validation2)
    print "Calculated validation2 loss %f" % (mean_squared_error(
        np.reshape(y_validation2, [len(y_validation2), look_ahead]),
        np.reshape(predictions_validation2_scaled, [len(predictions_validation2_scaled), look_ahead])))
    np.save(data_folder + "validation2_predictions", predictions_validation2)
    np.save(data_folder + "validation2_true", y_true_validation2)
    np.save(data_folder + "validation2_labels", validation2_labels)

    predictions_test, y_true_test = get_predictions("Test", model, X_test, y_test, train_scaler, batch_size, look_ahead,
                                                    look_back, epochs, experiment_id,
                                                    )
    predictions_test_scaled = train_scaler.transform(predictions_test)
    print "Calculated test loss %f" % (mean_squared_error(np.reshape(y_test, [len(y_test), look_ahead]),
                                                          np.reshape(predictions_test_scaled, [len(predictions_test_scaled), look_ahead])))

    np.save(data_folder + "test_predictions", predictions_test)
    np.save(data_folder + "test_true", y_true_test)
    np.save(data_folder + "test_labels", test_labels)

    logging.info(
        "---------------------------------------------------------------------")
    logging.info("Validation2 Loss %s" % (validation2_loss))
    logging.info("Test Loss %s" % (test_loss))
    logging.info(
        "------------------------------ run complete ------------------------------")
    logging.info("")
예제 #6
0
def run():
    #load config settings
    experiment_id = cfg.run_config['experiment_id']
    data_folder = cfg.run_config['data_folder']
    look_back = cfg.multi_step_lstm_config['look_back']
    look_ahead = cfg.multi_step_lstm_config['look_ahead']
    batch_size = cfg.multi_step_lstm_config['batch_size'] -(look_back+look_ahead) +1
    epochs = cfg.multi_step_lstm_config['n_epochs']
    dropout = cfg.multi_step_lstm_config['dropout']
    layers = cfg.multi_step_lstm_config['layers']
    loss = cfg.multi_step_lstm_config['loss']
    # optimizer = cfg.multi_step_lstm_config['optimizer']
    shuffle = cfg.multi_step_lstm_config['shuffle']
    patience = cfg.multi_step_lstm_config['patience']
    validation = cfg.multi_step_lstm_config['validation']
    learning_rate = cfg.multi_step_lstm_config['learning_rate']
    logging.info("----------------------------------------------------")
    logging.info('Run id %s' % (experiment_id))

    logging.info(" HYPERPRAMRAMS : %s" % (str(locals())))

    train_scaler, X_train, y_train, X_validation1, y_validation1, X_validation2, y_validation2, validation2_labels, \
    X_test, y_test, test_labels = util.load_data(data_folder, look_back, look_ahead)

    #For stateful lstm the batch_size needs to be fixed before hand.
    #We also need to ernsure that all batches shud have the same number of samples. So we drop the last batch as it has less elements than batch size
    if batch_size > 1:
        n_train_batches = len(X_train)/batch_size
        len_train = n_train_batches * batch_size
        if len_train < len(X_train):
            X_train = X_train[:len_train]
            y_train = y_train[:len_train]

        n_validation1_batches = len(X_validation1)/batch_size
        len_validation1 = n_validation1_batches * batch_size
        if n_validation1_batches * batch_size < len(X_validation1):
            X_validation1 = X_validation1[:len_validation1]
            y_validation1 = y_validation1[:len_validation1]

        n_validation2_batches = len(X_validation2) / batch_size
        len_validation2 = n_validation2_batches * batch_size
        if n_validation2_batches * batch_size < len(X_validation2):
            X_validation2 = X_validation2[:len_validation2]
            y_validation2 = y_validation2[:len_validation2]

        n_test_batches = len(X_test)/batch_size
        len_test = n_test_batches * batch_size
        if n_test_batches * batch_size < len(X_test):
            X_test = X_test[:len_test]
            y_test = y_test[:len_test]

    stateful_lstm = lstm.StatefulMultiStepLSTM(batch_size=batch_size, look_back=look_back, look_ahead=look_ahead,
                                          layers=layers,
                                          dropout=dropout, loss=loss, learning_rate=learning_rate)
    model = stateful_lstm.build_model()
    if cfg.run_config['save_figure']:
        plot_model(model, to_file="imgs/%s_stateful_lstm.png"%(experiment_id), show_shapes=True, show_layer_names=True)
    # train model on training set. validation1 set is used for early stopping
    history = lstm.train_stateful_model(model, X_train, y_train, batch_size, epochs, shuffle, validation, (X_validation1, y_validation1),
                     patience)
    fig = plt.figure()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    if cfg.run_config['save_figure']:
        util.save_figure("%s/%s/"%("imgs",experiment_id), "train_errors" , fig)

    validation2_loss = model.evaluate(X_validation2, y_validation2, batch_size=batch_size, verbose=2)
    print("Validation2 Loss %s" % (validation2_loss))
    logging.info("Validation2 Loss %s" % (validation2_loss))
    test_loss = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=2)
    print("Test Loss %s" % (test_loss))
    logging.info("Test Loss %s" % (test_loss))

    predictions_train, y_true_train = get_predictions("Train", model, X_train, y_train, train_scaler,
                                                               batch_size, look_ahead, look_back, epochs, experiment_id,
                                                               )
    np.save(data_folder + "train_predictions", predictions_train)
    np.save(data_folder + "train_true",y_true_train)


    predictions_validation1, y_true_validation1 = get_predictions("Validation1", model, X_validation1, y_validation1,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                  )
    predictions_validation1_scaled = train_scaler.transform(predictions_validation1)
    print("Calculated validation1 loss %f" % (mean_squared_error(
        np.reshape(y_validation1, [len(y_validation1), look_ahead]),
        np.reshape(predictions_validation1_scaled, [len(predictions_validation1_scaled), look_ahead]))))

    np.save(data_folder + "validation1_predictions", predictions_validation1)
    np.save(data_folder + "validation1_true", y_true_validation1)



    predictions_validation2, y_true_validation2 = get_predictions("Validation2", model, X_validation2, y_validation2,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                 )
    predictions_validation2_scaled = train_scaler.transform(predictions_validation2)
    print("Calculated validation2 loss %f"%(mean_squared_error(
        np.reshape(y_validation2, [len(y_validation2), look_ahead]),
        np.reshape(predictions_validation2_scaled, [len(predictions_validation2_scaled), look_ahead]))))

    np.save(data_folder + "validation2_predictions", predictions_validation2)
    np.save(data_folder + "validation2_true", y_true_validation2)
    np.save(data_folder + "validation2_labels", validation2_labels)

    predictions_test, y_true_test = get_predictions("Test", model, X_test, y_test, train_scaler, batch_size, look_ahead,
                                                    look_back, epochs, experiment_id,
                                                   )
    predictions_test_scaled = train_scaler.transform(predictions_test)
    print("Calculated test loss %f" % (mean_squared_error( np.reshape(y_test, [len(y_test),look_ahead]),
                                       np.reshape(predictions_test_scaled, [len(predictions_test_scaled),look_ahead]))))

    np.save(data_folder + "test_predictions", predictions_test)
    np.save(data_folder + "test_true", y_true_test)
    np.save(data_folder + "test_labels", test_labels)
    logging.info("-------------------------run complete----------------------------------------------")
def run():
    #load config settings
    experiment_id = cfg.run_config['experiment_id']
    data_folder = cfg.run_config['data_folder']
    look_back = cfg.multi_step_lstm_config['look_back']
    look_ahead = cfg.multi_step_lstm_config['look_ahead']
    batch_size = cfg.multi_step_lstm_config['batch_size'] -(look_back+look_ahead) +1
    epochs = cfg.multi_step_lstm_config['n_epochs']
    dropout = cfg.multi_step_lstm_config['dropout']
    layers = cfg.multi_step_lstm_config['layers']
    loss = cfg.multi_step_lstm_config['loss']
    # optimizer = cfg.multi_step_lstm_config['optimizer']
    shuffle = cfg.multi_step_lstm_config['shuffle']
    patience = cfg.multi_step_lstm_config['patience']
    validation = cfg.multi_step_lstm_config['validation']
    learning_rate = cfg.multi_step_lstm_config['learning_rate']
    logging.info("----------------------------------------------------")
    logging.info('Run id %s' % (experiment_id))

    logging.info(" HYPERPRAMRAMS : %s" % (str(locals())))

    train_scaler, X_train, y_train, X_validation1, y_validation1, X_validation2, y_validation2, validation2_labels, \
    X_test, y_test, test_labels = util.load_data(data_folder, look_back, look_ahead)

    #For stateful lstm the batch_size needs to be fixed before hand.
    #We also need to ernsure that all batches shud have the same number of samples. So we drop the last batch as it has less elements than batch size
    if batch_size > 1:
        n_train_batches = len(X_train)/batch_size
        len_train = n_train_batches * batch_size
        if len_train < len(X_train):
            X_train = X_train[:len_train]
            y_train = y_train[:len_train]

        n_validation1_batches = len(X_validation1)/batch_size
        len_validation1 = n_validation1_batches * batch_size
        if n_validation1_batches * batch_size < len(X_validation1):
            X_validation1 = X_validation1[:len_validation1]
            y_validation1 = y_validation1[:len_validation1]

        n_validation2_batches = len(X_validation2) / batch_size
        len_validation2 = n_validation2_batches * batch_size
        if n_validation2_batches * batch_size < len(X_validation2):
            X_validation2 = X_validation2[:len_validation2]
            y_validation2 = y_validation2[:len_validation2]

        n_test_batches = len(X_test)/batch_size
        len_test = n_test_batches * batch_size
        if n_test_batches * batch_size < len(X_test):
            X_test = X_test[:len_test]
            y_test = y_test[:len_test]

    stateful_lstm = lstm.StatefulMultiStepLSTM(batch_size=batch_size, look_back=look_back, look_ahead=look_ahead,
                                          layers=layers,
                                          dropout=dropout, loss=loss, learning_rate=learning_rate)
    model = stateful_lstm.build_model()
    if cfg.run_config['save_figure']:
        plot_model(model, to_file="imgs/%s_stateful_lstm.png"%(experiment_id), show_shapes=True, show_layer_names=True)
    # train model on training set. validation1 set is used for early stopping
    history = lstm.train_stateful_model(model, X_train, y_train, batch_size, epochs, shuffle, validation, (X_validation1, y_validation1),
                     patience)
    fig = plt.figure()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    if cfg.run_config['save_figure']:
        util.save_figure("%s/%s/"%("imgs",experiment_id), "train_errors" , fig)

    validation2_loss = model.evaluate(X_validation2, y_validation2, batch_size=batch_size, verbose=2)
    print "Validation2 Loss %s" % (validation2_loss)
    logging.info("Validation2 Loss %s" % (validation2_loss))
    test_loss = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=2)
    print "Test Loss %s" % (test_loss)
    logging.info("Test Loss %s" % (test_loss))

    predictions_train, y_true_train = get_predictions("Train", model, X_train, y_train, train_scaler,
                                                               batch_size, look_ahead, look_back, epochs, experiment_id,
                                                               )
    np.save(data_folder + "train_predictions", predictions_train)
    np.save(data_folder + "train_true",y_true_train)


    predictions_validation1, y_true_validation1 = get_predictions("Validation1", model, X_validation1, y_validation1,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                  )
    predictions_validation1_scaled = train_scaler.transform(predictions_validation1)
    print "Calculated validation1 loss %f" % (mean_squared_error(
        np.reshape(y_validation1, [len(y_validation1), look_ahead]),
        np.reshape(predictions_validation1_scaled, [len(predictions_validation1_scaled), look_ahead])))

    np.save(data_folder + "validation1_predictions", predictions_validation1)
    np.save(data_folder + "validation1_true", y_true_validation1)



    predictions_validation2, y_true_validation2 = get_predictions("Validation2", model, X_validation2, y_validation2,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                 )
    predictions_validation2_scaled = train_scaler.transform(predictions_validation2)
    print "Calculated validation2 loss %f"%(mean_squared_error(
        np.reshape(y_validation2, [len(y_validation2), look_ahead]),
        np.reshape(predictions_validation2_scaled, [len(predictions_validation2_scaled), look_ahead])))

    np.save(data_folder + "validation2_predictions", predictions_validation2)
    np.save(data_folder + "validation2_true", y_true_validation2)
    np.save(data_folder + "validation2_labels", validation2_labels)

    predictions_test, y_true_test = get_predictions("Test", model, X_test, y_test, train_scaler, batch_size, look_ahead,
                                                    look_back, epochs, experiment_id,
                                                   )
    predictions_test_scaled = train_scaler.transform(predictions_test)
    print "Calculated test loss %f" % (mean_squared_error( np.reshape(y_test, [len(y_test),look_ahead]),
                                       np.reshape(predictions_test_scaled, [len(predictions_test_scaled),look_ahead])))

    np.save(data_folder + "test_predictions", predictions_test)
    np.save(data_folder + "test_true", y_true_test)
    np.save(data_folder + "test_labels", test_labels)
    logging.info("-------------------------run complete----------------------------------------------")
def run():
    #load config settings
    experiment_id = cfg.run_config['experiment_id']
    data_folder = cfg.run_config['data_folder']
    look_back = cfg.multi_step_lstm_config['look_back']
    look_ahead = cfg.multi_step_lstm_config['look_ahead']
    batch_size = cfg.multi_step_lstm_config['batch_size'] -(look_back+look_ahead) +1
    epochs = cfg.multi_step_lstm_config['n_epochs']
    dropout = cfg.multi_step_lstm_config['dropout']
    layers = cfg.multi_step_lstm_config['layers']
    loss = cfg.multi_step_lstm_config['loss']
    shuffle = cfg.multi_step_lstm_config['shuffle']
    patience = cfg.multi_step_lstm_config['patience']
    validation = cfg.multi_step_lstm_config['validation']
    learning_rate = cfg.multi_step_lstm_config['learning_rate']

    train_scaler, X_train, y_train, X_validation1, y_validation1, X_validation2, y_validation2, validation2_labels, \
    X_test, y_test, test_labels = util.load_data(data_folder, look_back, look_ahead)

    #For stateful lstm the batch_size needs to be fixed before hand.
    #We also need to ernsure that all batches shud have the same number of samples. So we drop the last batch as it has less elements than batch size
    if batch_size > 1:
        n_train_batches = int(len(X_train)/batch_size)
        len_train = n_train_batches * batch_size
        if len_train < len(X_train):
            X_train = X_train[:len_train]
            y_train = y_train[:len_train]

        n_validation1_batches = int(len(X_validation1)/batch_size)
        len_validation1 = n_validation1_batches * batch_size
        if n_validation1_batches * batch_size < len(X_validation1):
            X_validation1 = X_validation1[:len_validation1]
            y_validation1 = y_validation1[:len_validation1]

        n_validation2_batches = int(len(X_validation2) / batch_size)
        len_validation2 = n_validation2_batches * batch_size
        if n_validation2_batches * batch_size < len(X_validation2):
            X_validation2 = X_validation2[:len_validation2]
            y_validation2 = y_validation2[:len_validation2]

        n_test_batches = int(len(X_test)/batch_size)
        len_test = n_test_batches * batch_size
        if n_test_batches * batch_size < len(X_test):
            X_test = X_test[:len_test]
            y_test = y_test[:len_test]

    stateful_lstm = lstm.StatefulMultiStepLSTM(batch_size=batch_size, look_back=look_back, look_ahead=look_ahead,
                                          layers=layers,
                                          dropout=dropout, loss=loss, learning_rate=learning_rate)
    model = stateful_lstm.build_model()
    history = lstm.train_stateful_model(model, X_train, y_train, batch_size, epochs, shuffle, validation, (X_validation1, y_validation1),
                     patience)
    fig = plt.figure()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    if cfg.run_config['save_figure']:
        util.save_figure("%s/%s/"%("imgs",experiment_id), "train_errors" , fig)

    validation2_loss = model.evaluate(X_validation2, y_validation2, batch_size=batch_size, verbose=2)
    test_loss = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=2)
    predictions_train, y_true_train = get_predictions("Train", model, X_train, y_train, train_scaler,
                                                               batch_size, look_ahead, look_back, epochs, experiment_id,
                                                               )
    np.save(data_folder + "train_predictions", predictions_train)
    np.save(data_folder + "train_true",y_true_train)

    predictions_validation1, y_true_validation1 = get_predictions("Validation1", model, X_validation1, y_validation1,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                  )

    np.save(data_folder + "validation1_predictions", predictions_validation1)
    np.save(data_folder + "validation1_true", y_true_validation1)


    predictions_validation2, y_true_validation2 = get_predictions("Validation2", model, X_validation2, y_validation2,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                 )
    
    np.save(data_folder + "validation2_predictions", predictions_validation2)
    np.save(data_folder + "validation2_true", y_true_validation2)
    np.save(data_folder + "validation2_labels", validation2_labels)
    predictions_test, y_true_test = get_predictions("Test", model, X_test, y_test, train_scaler, batch_size, look_ahead,
                                                    look_back, epochs, experiment_id,
                                                   )

    np.save(data_folder + "test_predictions", predictions_test)
    np.save(data_folder + "test_true", y_true_test)
    np.save(data_folder + "test_labels", test_labels)
예제 #9
0
def run():
    #load config settings
    experiment_id = cfg.run_config['experiment_id']
    data_folder = cfg.run_config['data_folder']
    look_back = cfg.multi_step_lstm_config['look_back']
    look_ahead = cfg.multi_step_lstm_config['look_ahead']
    batch_size = cfg.multi_step_lstm_config['batch_size']
    epochs = cfg.multi_step_lstm_config['n_epochs']
    dropout = cfg.multi_step_lstm_config['dropout']
    layers = cfg.multi_step_lstm_config['layers']
    loss = cfg.multi_step_lstm_config['loss']
    # optimizer = cfg.multi_step_lstm_config['optimizer']
    shuffle = cfg.multi_step_lstm_config['shuffle']
    patience = cfg.multi_step_lstm_config['patience']
    validation = cfg.multi_step_lstm_config['validation']
    learning_rate = cfg.multi_step_lstm_config['learning_rate']
    logging.info("----------------------------------------------------")
    logging.info('Run id %s' % (experiment_id))

    logging.info(" HYPERPRAMRAMS : %s" % (str(locals())))

    train_scaler, X_train, y_train, X_validation1, y_validation1, X_validation2, y_validation2, validation2_labels, \
    X_test, y_test, test_labels = util.load_data(data_folder, look_back, look_ahead)



    multistep_lstm = lstm.MultiStepLSTM( look_back=look_back, look_ahead=look_ahead,
                                          layers=layers,
                                          dropout=dropout, loss=loss, learning_rate=learning_rate)
    model = multistep_lstm.build_model()
    if cfg.run_config['save_figure']:
        plot_model(model, to_file="imgs/%s_lstm.png"%(experiment_id), show_shapes=True, show_layer_names=True)
    # train model on training set. validation1 set is used for early stopping

    fig = plt.figure()
    history = lstm.train_model(model, X_train, y_train, batch_size, epochs, shuffle, validation, (X_validation1, y_validation1), patience)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    if cfg.run_config['save_figure']:
        util.save_figure("%s/%s/" % ("imgs", experiment_id), "train_errors", fig)

    validation2_loss = model.evaluate(X_validation2, y_validation2, batch_size=batch_size, verbose=2)
    print "Validation2 Loss %s" % (validation2_loss)
    logging.info("Validation2 Loss %s" % (validation2_loss))
    test_loss = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=2)
    print "Test Loss %s" % (test_loss)
    logging.info("Test Loss %s" % (test_loss))

    predictions_train, y_true_train = get_predictions("Train", model, X_train, y_train, train_scaler,
                                                               batch_size, look_ahead, look_back, epochs, experiment_id,
                                                               )
    np.save(data_folder + "train_predictions", predictions_train)
    np.save(data_folder + "train_true",y_true_train)

    predictions_validation1, y_true_validation1 = get_predictions("Validation1", model, X_validation1, y_validation1,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                  )
    predictions_validation1_scaled = train_scaler.transform(predictions_validation1)
    print "Calculated validation1 loss %f" % (mean_squared_error(
        np.reshape(y_validation1, [len(y_validation1), look_ahead]),
        np.reshape(predictions_validation1_scaled, [len(predictions_validation1_scaled), look_ahead])))
    np.save(data_folder + "validation1_predictions", predictions_validation1)
    np.save(data_folder + "validation1_true", y_true_validation1)
    np.save(data_folder + "validation1_labels", validation2_labels)

    predictions_validation2, y_true_validation2 = get_predictions("Validation2", model, X_validation2, y_validation2,
                                                                  train_scaler, batch_size, look_ahead, look_back,
                                                                  epochs, experiment_id,
                                                                 )
    predictions_validation2_scaled = train_scaler.transform(predictions_validation2)
    print "Calculated validation2 loss %f"%(mean_squared_error(
        np.reshape(y_validation2, [len(y_validation2), look_ahead]),
        np.reshape(predictions_validation2_scaled, [len(predictions_validation2_scaled), look_ahead])))
    np.save(data_folder + "validation2_predictions", predictions_validation2)
    np.save(data_folder + "validation2_true", y_true_validation2)
    np.save(data_folder + "validation2_labels", validation2_labels)


    predictions_test, y_true_test = get_predictions("Test", model, X_test, y_test, train_scaler, batch_size, look_ahead,
                                                    look_back, epochs, experiment_id,
                                                   )
    predictions_test_scaled = train_scaler.transform(predictions_test)
    print "Calculated test loss %f" % (mean_squared_error( np.reshape(y_test, [len(y_test),look_ahead]),
                                       np.reshape(predictions_test_scaled, [len(predictions_test_scaled),look_ahead])))

    np.save(data_folder + "test_predictions", predictions_test)
    np.save(data_folder + "test_true", y_true_test)
    np.save(data_folder + "test_labels", test_labels)
    logging.info("-------------------------run complete----------------------------------------------")