Пример #1
0
    def __init__(self, model_shape=tuple(), num_time_steps=None):

        # Constants
        self.DISCOUNT = 0.99
        self.REPLAY_MEMORY_SIZE = 50_000  # How many last steps to keep for model training
        self.MIN_REPLAY_MEMORY_SIZE = 0.3 * self.REPLAY_MEMORY_SIZE  # Minimum number of steps in a memory to start training
        self.MINIBATCH_SIZE = 16  # How many steps (samples) to use for training
        self.UPDATE_TARGET_EVERY = 10  # Terminal states (end of episodes)

        # Main model - gets trained every step
        self.st_shape = model_shape[0]
        self.lt_shape = model_shape[1]
        self.num_time_steps = num_time_steps
        self.model = self._create_model()

        # Save initial model weights
        self.initial_model_weights = np.array(self.model.get_weights()).ravel()

        # Target model this is what we .predict against every step
        self.target_model = self._create_model()
        self.target_model.set_weights(self.model.get_weights())

        # Parameters
        self.replay_memory_allocation = 0
        self.replay_memory = deque(maxlen=self.REPLAY_MEMORY_SIZE)
        self.replay_priority = deque(maxlen=self.REPLAY_MEMORY_SIZE)
        self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/log-{1}")
        self.target_update_counter = 0
        self.elapsed = 0
        self.conf_mat = np.array([])

        # MAE to calculate the error for prioritized replay priority
        self.mae = MeanAbsoluteError(reduction=tf.keras.losses.Reduction.NONE)
        self.action_errors = {0: list(), 1: list(), 2: list()}
Пример #2
0
    def lstm(self, path=None, name=None):
        trainX = np.reshape(self.X_train,
                            (self.X_train.shape[0], 1, self.X_train.shape[1]))
        testX = np.reshape(self.X_test,
                           (self.X_test.shape[0], 1, self.X_test.shape[1]))

        model = Sequential()
        model.add(
            LSTM(32,
                 batch_input_shape=(1, trainX.shape[1], trainX.shape[2]),
                 stateful=True))
        # model.add(Activation('tanh'))
        model.add(Dense(1))
        # model.add(Activation('linear'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        # model.summary()

        for i in range(50):
            model.fit(trainX,
                      self.y_train,
                      epochs=1,
                      batch_size=1,
                      verbose=self.VERBOSE,
                      shuffle=False)
            model.reset_states()

        y_pred = model.predict(testX, batch_size=1)

        y_pred = y_pred.reshape(-1)

        # mae = MeanAbsoluteError()
        # error = mae(self.y_test, y_predict).numpy()

        # mape = MeanAbsolutePercentageError()
        # error = mape(self.y_test, y_predict).numpy()

        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        # msle = MeanSquaredLogarithmicError()
        # loss_msle = msle(y_test, y_pred).numpy()

        # print(error)
        #
        # plt.plot(self.y_test)
        # plt.plot(y_predict)
        # plt.show()

        # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts)
        # show_plot(self.y_test, y_predict)
        return loss_rmse, loss_mae, loss_mape
Пример #3
0
    def dnn(self, path=None, name=None):
        model = Sequential()
        print(len(self.X_train))
        model.add(Dense(self.INPUT_DIM, input_shape=(self.INPUT_DIM, )))
        model.add(Activation('relu'))
        for i in range(7):
            model.add(Dense(self.N_HIDDEN))
            model.add(Activation('relu'))
        model.add(Dense(1))
        model.add(Activation('linear'))
        model.summary()
        model.compile(loss='mse',
                      optimizer=self.OPTIMIZER,
                      metrics=['accuracy'])
        history = model.fit(self.X_train,
                            self.y_train,
                            epochs=self.NB_EPOCH,
                            verbose=self.VERBOSE)

        y_pred = model.predict(self.X_test)

        y_pred = y_pred.reshape(-1)

        # mae = MeanAbsoluteError()
        # error = mae(self.y_test, y_predict).numpy()
        # print(error)
        #
        # mape = MeanAbsolutePercentageError()
        # error = mape(self.y_test, y_predict).numpy()
        #
        # print(error)
        #
        plt.plot(self.y_test)
        plt.plot(y_pred)
        plt.legend(['real', 'prediction'])
        plt.savefig(name + '.png')
        plt.show()
        r = pd.DataFrame(self.y_test)
        p = pd.DataFrame(y_pred)
        r.to_excel(name + '-real.xlsx')
        p.to_excel(name + '-prediction.xlsx')

        # mape_error = mean_absolute_percentage_error(self.y_test, y_predict)

        # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts)
        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mae, loss_mape
    def CNN_LSTM(self, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values

        trainX = np.reshape(self.X_train, (self.X_train.shape[0], 64, -1))
        testX = np.reshape(self.X_test, (self.X_test.shape[0], 64, -1))

        with tf.device('/device:CPU:0'):
            model = Sequential()
            input_layer = Input(shape=(64, 1))
            conv1 = Conv1D(filters=32,
                           kernel_size=8,
                           strides=1,
                           activation='relu',
                           padding='same')(input_layer)
            lstm1 = LSTM(32, return_sequences=True)(conv1)
            output_layer = Dense(1, activation='linear')(lstm1)
            model = Model(inputs=input_layer, outputs=output_layer)
            # print(model.summary())
            model.compile(loss='mse', optimizer='adam')

            model.fit(trainX,
                      self.y_train,
                      epochs=2000,
                      batch_size=32,
                      verbose=self.VERBOSE)

            y_pred = model.predict(testX, batch_size=1)
            y_pred = y_pred.reshape(-1)

            print(self.y_test.shape)
            print(y_pred.shape)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def CNN(self, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values

        trainX = np.reshape(self.X_train, (self.X_train.shape[0], 9, -1))
        testX = np.reshape(self.X_test, (self.X_test.shape[0], 9, -1))

        with tf.device('/device:CPU:0'):
            model = Sequential()
            model.add(
                Conv1D(filters=64,
                       kernel_size=3,
                       activation='relu',
                       input_shape=(trainX.shape[1], trainX.shape[2])))
            model.add(Dropout(0.5))
            # model.add(MaxPooling1D(pool_size=2))
            model.add(Flatten())
            model.add(Dense(100, activation='relu'))
            model.add(Dense(1, activation='linear'))
            model.compile(loss='mse', optimizer='adam')

            model.fit(trainX,
                      self.y_train,
                      epochs=2000,
                      batch_size=32,
                      verbose=self.VERBOSE)

            y_pred = model.predict(testX, batch_size=1)
            y_pred = y_pred.reshape(-1)

            print(self.y_test.shape)
            print(y_pred.shape)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def lstm(self, path=None, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values
        trainX = np.reshape(self.X_train,
                            (self.X_train.shape[0], 1, self.X_train.shape[1]))
        testX = np.reshape(self.X_test,
                           (self.X_test.shape[0], 1, self.X_test.shape[1]))

        model = Sequential()
        model.add(
            LSTM(128,
                 batch_input_shape=(1, trainX.shape[1], trainX.shape[2]),
                 stateful=True))
        # model.add(Activation('tanh'))
        model.add(Dense(1))
        # model.add(Activation('linear'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        model.summary()

        for i in range(2000):
            model.fit(trainX,
                      self.y_train,
                      epochs=1,
                      batch_size=1,
                      verbose=self.VERBOSE,
                      shuffle=False)
            model.reset_states()

        y_pred = model.predict(testX, batch_size=1)

        y_pred = y_pred.reshape(-1)

        plt.plot(self.y_test)
        plt.plot(y_pred)
        plt.legend(['real', 'prediction'])
        plt.savefig(f'./results/{name}.png')
        plt.clf()
        # plt.show()

        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def lstm_with_sequence(self, path=None, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values

        trainX = np.reshape(self.X_train, (self.X_train.shape[0], -1, 3))
        testX = np.reshape(self.X_test, (self.X_test.shape[0], -1, 3))

        with tf.device('/device:CPU:0'):
            model = Sequential()
            model.add(
                LSTM(128,
                     activation='relu',
                     input_shape=(trainX.shape[1], trainX.shape[2])))
            model.add(Dense(1))
            model.compile(loss='mean_squared_error', optimizer='adam')
            # model.summary()

            model.fit(trainX,
                      self.y_train,
                      epochs=300,
                      batch_size=10,
                      verbose=self.VERBOSE,
                      shuffle=False)

            y_pred = model.predict(testX, batch_size=1)
            y_pred = y_pred.reshape(-1)

            print(self.y_test.shape)
            print(y_pred.shape)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
Пример #8
0
    def getErrors(self, predictions, test_data):

        mse = MeanSquaredError()
        mse_outcome = mse(np.insert(predictions, 0, self.batch_size, axis=0),
                          np.insert(test_data, 0, self.batch_size,
                                    axis=0)).numpy()

        mae = MeanAbsoluteError()
        mae_outcome = mae(np.insert(predictions, 0, self.batch_size, axis=0),
                          np.insert(test_data, 0, self.batch_size,
                                    axis=0)).numpy()

        mape = MeanAbsolutePercentageError()
        mape_outcome = mape(np.insert(predictions, 0, self.batch_size, axis=0),
                            np.insert(test_data, 0, self.batch_size,
                                      axis=0)).numpy()

        return (mse_outcome, mae_outcome, mape_outcome)
    def dnn(self, path=None, name=None):
        with tf.device('/device:GPU:0'):
            model = Sequential()
            # print(self.X_train.shape[1])
            model.add(Dense(self.X_train.shape[1], input_shape=(self.X_train.shape[1],)))
            model.add(Activation('relu'))
            for i in range(3):
                model.add(Dense(self.N_HIDDEN))
                model.add(Activation('relu'))
            model.add(Dense(1))
            model.add(Activation('soft-max'))
            model.summary()
            model.compile(loss='mse',
                          optimizer=self.OPTIMIZER,
                          metrics=['accuracy'])
            history = model.fit(self.X_train, self.y_train,
                                epochs=self.NB_EPOCH,
                                verbose=self.VERBOSE)
            print(self.X_train)

            y_pred = model.predict(self.X_test)

            y_pred = y_pred.reshape(-1)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def svr(self):
        from sklearn.svm import SVR
        from sklearn.pipeline import make_pipeline
        from sklearn.preprocessing import StandardScaler

        regr = make_pipeline(SVR(C=1.0, epsilon=0.2))
        regr.fit(self.X_train, self.y_train)

        y_pred = regr.predict(self.X_test)

        y_pred = y_pred.reshape(-1)

        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mae, loss_mape
Пример #11
0
def l1_b(b_true, b_pred, sample_weight):
    return MeanAbsoluteError()(b_true, b_pred, sample_weight=sample_weight)
Пример #12
0
def l1_f(f_true, f_pred, sample_weight):
    return MeanAbsoluteError()(f_true, f_pred, sample_weight=sample_weight)
Пример #13
0
def l1_a(a_true, a_pred, sample_weight):
    return MeanAbsoluteError()(a_true, a_pred, sample_weight=sample_weight)
Пример #14
0
def lc_fb(a_true, c_true, f_pred, b_pred, sample_weight):
    c_pred = a_true * f_pred + (1. - a_true) * b_pred

    return MeanAbsoluteError()(c_true, c_pred, sample_weight=sample_weight)
Пример #15
0
def lc_a(f_true, b_true, c_true, a_pred, sample_weight):
    c_pred = a_pred * f_true + (1. - a_pred) * b_true

    return MeanAbsoluteError()(c_true, c_pred, sample_weight=sample_weight)
Пример #16
0
def _loss_56_2(y_true, y_pred, sample_weight=None):
    return .5 * BinaryCrossentropy()(y_true, y_pred, sample_weight) + \
           .25 * MeanAbsoluteError()(y_true, y_pred, sample_weight) + \
           .25 * MeanSquaredError()(y_true, y_pred, sample_weight)
Пример #17
0
def _loss_224(y_true, y_pred, sample_weight=None):
    return MeanAbsoluteError()(y_true, y_pred, sample_weight) + \
           MeanSquaredError()(y_true, y_pred, sample_weight) + \
           5. * SobelEdgeLoss()(y_true, y_pred, sample_weight)
Пример #18
0
def main():
    table = 'Table2'
    stack = 'Stack2'
    gas = 'SO2'
    NN = 'LSTM'
    OP = 'MVO'
    data = pd.read_excel(stack + '.xlsx')
    # data = data.set_index(data.iloc[:, 0])
    # data = data.iloc[:, 1:]
    # data = data.dropna()
    # data = data.iloc[1:]
    # data.to_excel('Stack2.xlsx')

    W_S = data['W_S']
    T = data['T']
    data = data[gas]
    # 250 490 737 985

    may = data[:250]
    june = data[250:490]
    july = data[490:737]
    agust = data[737:985]
    september = data[985:]

    may_w_s = W_S[:250]
    june_w_s = W_S[250:490]
    july_w_s = W_S[490:737]
    agust_w_s = W_S[737:985]
    september_w_s = W_S[985:]

    may_t = T[:250]
    june_t = T[250:490]
    july_t = T[490:737]
    agust_t = T[737:985]
    september_t = T[985:]

    d = [may, june, july, agust, september]
    d_w_s = [may_w_s, june_w_s, july_w_s, agust_w_s, september_w_s]
    d_t = [may_t, june_t, july_t, agust_t, september_t]

    dd = ['may', 'june', 'july', 'agust', 'september']

    BS = 3
    TS = None

    p = dict()
    pp = dict()
    # for i in range(5):
    #     dnn = DNN(d[i], BS, TS)
    #     rmse, mae, mape = dnn.svr()
    #     p[dd[i]] = [rmse, mae, mape]
    # pp = pd.DataFrame(p)
    # pp.to_excel('2so.xlsx')
    for i in range(5):
        X = d[i]

        X, y = prepare_data_window(X, BS, d_w_s[i], d_t[i])
        y = np.array(y)
        y = y.reshape(-1, 1)

        scaler_X = MinMaxScaler()
        scaler_X = scaler_X.fit(X)
        X = scaler_X.transform(X)

        scaler_y = MinMaxScaler()
        scaler_y = scaler_y.fit(y)
        y = scaler_y.transform(y)

        y = y.reshape(-1, 1)

        # LSTM************************
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=.33,
                                                            shuffle=False)
        X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
        X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))

        y_train = np.array(y_train)
        y_test = np.array(y_test)

        y_train = y_train.flatten()
        y_test = y_test.flatten()

        input_shape = X_train.shape
        model = Model().get_lstm(input_shape)
        Wb = get_weights_and_biases(model)
        Wb_flatten = flatten_weights_and_biases(Wb)
        dimensions = len(Wb_flatten)

        # # PSO
        # pso = PSO(model, dimensions, X_train, y_train, None,
        #                 init_weights=None, n_iteration=50)
        # cost, pos = pso.PSO()

        # MVO
        mvo = MVO(model, dimensions, X_train, y_train)
        cost, pos = mvo.MVO()

        model = Model().get_lstm(input_shape)
        Wb_model = unflatten_weights_and_biases(Wb, pos)
        model = put_weights_and_biases(model, Wb_model)
        y_pred = model.predict(X_test)
        # # mse = MeanSquaredError()
        # # loss = mse(y_test, y_pred).numpy()
        #
        # # with open(f'./Results/{file_name}', 'w') as f:
        # #     f.write(str(loss))
        # # LSTM--------------------------------

        # # ENN+++++++++++++++++++++++++++++++++
        # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.33, shuffle=False)
        # y_train = y_train.reshape(-1, 1)
        #
        # y_train = np.array(y_train)
        # y_test = np.array(y_test)
        #
        # # y_train = y_train.flatten()
        # # y_test = y_test.flatten()
        #
        # net = Model().get_elman(3)
        # w, s = get_elman_weights(net)
        #
        # dimensions = len(w)
        #
        # # error = net.train(X_train, y_train, epochs=500, show=100, goal=0.01)
        #
        # # PSO
        # pso_elman = PSO_ENN.PSO(net, dimensions, X_train, y_train, None,
        #           init_weights=None, n_iteration=50)
        # cost, pos = pso_elman.PSO()
        #
        # # # MVO
        # # mvo = MVO_ENN.MVO(net, dimensions, X_train, y_train)
        # # cost, pos = mvo.MVO()
        #
        # net = set_elman_weights(net, pos)
        #
        # y_pred = net.sim(X_test)
        #
        # # # model = Model().get_lstm(input_shape)
        # # # Wb_model = unflatten_weights_and_biases(Wb, pos)
        # # # model = put_weights_and_biases(model, Wb_model)
        # # # y_pred = model.predict(X_test)
        #
        # # ENN---------------------------------

        mse = MeanSquaredError()
        loss_mse = mse(y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(y_test, y_pred).numpy()

        p[dd[i]] = [loss_rmse, loss_mae, loss_mape]

        file_name_real = f'{stack}_{gas}_{dd[i]}_{NN}_{OP}_real.xlsx'
        file_name_pred = f'{stack}_{gas}_{dd[i]}_{NN}_{OP}_pred.xlsx'

        pp[file_name_real] = y_test
        pp[file_name_pred] = y_pred

        # plt.plot(y_pred)
        # plt.plot(y_test)
        # plt.show()

    p2 = pd.DataFrame(p)
    p2.to_excel('2so.xlsx')

    pp2 = pd.DataFrame.from_dict(pp, orient='index')
    pp2 = pp2.transpose()
    pp2.to_excel(f'{table}_{stack}_{gas}_{NN}_{OP}.xlsx')