Esempio n. 1
0
    def lstm(self, path=None, name=None):
        trainX = np.reshape(self.X_train,
                            (self.X_train.shape[0], 1, self.X_train.shape[1]))
        testX = np.reshape(self.X_test,
                           (self.X_test.shape[0], 1, self.X_test.shape[1]))

        model = Sequential()
        model.add(
            LSTM(32,
                 batch_input_shape=(1, trainX.shape[1], trainX.shape[2]),
                 stateful=True))
        # model.add(Activation('tanh'))
        model.add(Dense(1))
        # model.add(Activation('linear'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        # model.summary()

        for i in range(50):
            model.fit(trainX,
                      self.y_train,
                      epochs=1,
                      batch_size=1,
                      verbose=self.VERBOSE,
                      shuffle=False)
            model.reset_states()

        y_pred = model.predict(testX, batch_size=1)

        y_pred = y_pred.reshape(-1)

        # mae = MeanAbsoluteError()
        # error = mae(self.y_test, y_predict).numpy()

        # mape = MeanAbsolutePercentageError()
        # error = mape(self.y_test, y_predict).numpy()

        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        # msle = MeanSquaredLogarithmicError()
        # loss_msle = msle(y_test, y_pred).numpy()

        # print(error)
        #
        # plt.plot(self.y_test)
        # plt.plot(y_predict)
        # plt.show()

        # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts)
        # show_plot(self.y_test, y_predict)
        return loss_rmse, loss_mae, loss_mape
Esempio n. 2
0
    def dnn(self, path=None, name=None):
        model = Sequential()
        print(len(self.X_train))
        model.add(Dense(self.INPUT_DIM, input_shape=(self.INPUT_DIM, )))
        model.add(Activation('relu'))
        for i in range(7):
            model.add(Dense(self.N_HIDDEN))
            model.add(Activation('relu'))
        model.add(Dense(1))
        model.add(Activation('linear'))
        model.summary()
        model.compile(loss='mse',
                      optimizer=self.OPTIMIZER,
                      metrics=['accuracy'])
        history = model.fit(self.X_train,
                            self.y_train,
                            epochs=self.NB_EPOCH,
                            verbose=self.VERBOSE)

        y_pred = model.predict(self.X_test)

        y_pred = y_pred.reshape(-1)

        # mae = MeanAbsoluteError()
        # error = mae(self.y_test, y_predict).numpy()
        # print(error)
        #
        # mape = MeanAbsolutePercentageError()
        # error = mape(self.y_test, y_predict).numpy()
        #
        # print(error)
        #
        plt.plot(self.y_test)
        plt.plot(y_pred)
        plt.legend(['real', 'prediction'])
        plt.savefig(name + '.png')
        plt.show()
        r = pd.DataFrame(self.y_test)
        p = pd.DataFrame(y_pred)
        r.to_excel(name + '-real.xlsx')
        p.to_excel(name + '-prediction.xlsx')

        # mape_error = mean_absolute_percentage_error(self.y_test, y_predict)

        # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts)
        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mae, loss_mape
Esempio n. 3
0
    def f(self, Wb_flatten):
        # Assigning Weights
        self.model = set_elman_weights(self.model, Wb_flatten)

        # Computing MSE
        y_pred = self.model.sim(self.X_train)
        mse = MeanSquaredError()
        loss = mse(self.y_train, y_pred).numpy()

        return loss
Esempio n. 4
0
    def loss_function(self, Wb_flatten):
        # Assigning Weights
        Wb = get_weights_and_biases(self.model)
        Wb = unflatten_weights_and_biases(Wb, Wb_flatten)
        self.model = put_weights_and_biases(self.model, Wb)

        # Computing MSE
        y_pred = self.model.predict(self.X_train)
        mse = MeanSquaredError()
        loss = mse(self.y_train, y_pred).numpy()

        return loss
    def CNN(self, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values

        trainX = np.reshape(self.X_train, (self.X_train.shape[0], 9, -1))
        testX = np.reshape(self.X_test, (self.X_test.shape[0], 9, -1))

        with tf.device('/device:CPU:0'):
            model = Sequential()
            model.add(
                Conv1D(filters=64,
                       kernel_size=3,
                       activation='relu',
                       input_shape=(trainX.shape[1], trainX.shape[2])))
            model.add(Dropout(0.5))
            # model.add(MaxPooling1D(pool_size=2))
            model.add(Flatten())
            model.add(Dense(100, activation='relu'))
            model.add(Dense(1, activation='linear'))
            model.compile(loss='mse', optimizer='adam')

            model.fit(trainX,
                      self.y_train,
                      epochs=2000,
                      batch_size=32,
                      verbose=self.VERBOSE)

            y_pred = model.predict(testX, batch_size=1)
            y_pred = y_pred.reshape(-1)

            print(self.y_test.shape)
            print(y_pred.shape)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def CNN_LSTM(self, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values

        trainX = np.reshape(self.X_train, (self.X_train.shape[0], 64, -1))
        testX = np.reshape(self.X_test, (self.X_test.shape[0], 64, -1))

        with tf.device('/device:CPU:0'):
            model = Sequential()
            input_layer = Input(shape=(64, 1))
            conv1 = Conv1D(filters=32,
                           kernel_size=8,
                           strides=1,
                           activation='relu',
                           padding='same')(input_layer)
            lstm1 = LSTM(32, return_sequences=True)(conv1)
            output_layer = Dense(1, activation='linear')(lstm1)
            model = Model(inputs=input_layer, outputs=output_layer)
            # print(model.summary())
            model.compile(loss='mse', optimizer='adam')

            model.fit(trainX,
                      self.y_train,
                      epochs=2000,
                      batch_size=32,
                      verbose=self.VERBOSE)

            y_pred = model.predict(testX, batch_size=1)
            y_pred = y_pred.reshape(-1)

            print(self.y_test.shape)
            print(y_pred.shape)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def lstm(self, path=None, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values
        trainX = np.reshape(self.X_train,
                            (self.X_train.shape[0], 1, self.X_train.shape[1]))
        testX = np.reshape(self.X_test,
                           (self.X_test.shape[0], 1, self.X_test.shape[1]))

        model = Sequential()
        model.add(
            LSTM(128,
                 batch_input_shape=(1, trainX.shape[1], trainX.shape[2]),
                 stateful=True))
        # model.add(Activation('tanh'))
        model.add(Dense(1))
        # model.add(Activation('linear'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        model.summary()

        for i in range(2000):
            model.fit(trainX,
                      self.y_train,
                      epochs=1,
                      batch_size=1,
                      verbose=self.VERBOSE,
                      shuffle=False)
            model.reset_states()

        y_pred = model.predict(testX, batch_size=1)

        y_pred = y_pred.reshape(-1)

        plt.plot(self.y_test)
        plt.plot(y_pred)
        plt.legend(['real', 'prediction'])
        plt.savefig(f'./results/{name}.png')
        plt.clf()
        # plt.show()

        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def lstm_with_sequence(self, path=None, name=None):
        self.X_train = self.X_train.values
        self.X_test = self.X_test.values

        trainX = np.reshape(self.X_train, (self.X_train.shape[0], -1, 3))
        testX = np.reshape(self.X_test, (self.X_test.shape[0], -1, 3))

        with tf.device('/device:CPU:0'):
            model = Sequential()
            model.add(
                LSTM(128,
                     activation='relu',
                     input_shape=(trainX.shape[1], trainX.shape[2])))
            model.add(Dense(1))
            model.compile(loss='mean_squared_error', optimizer='adam')
            # model.summary()

            model.fit(trainX,
                      self.y_train,
                      epochs=300,
                      batch_size=10,
                      verbose=self.VERBOSE,
                      shuffle=False)

            y_pred = model.predict(testX, batch_size=1)
            y_pred = y_pred.reshape(-1)

            print(self.y_test.shape)
            print(y_pred.shape)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
Esempio n. 9
0
    def fitness(self):
        for index, Wb_flatten in enumerate(self.population):
            # Assigning Weights
            Wb = get_weights_and_biases(self.model)
            Wb = unflatten_weights_and_biases(Wb, Wb_flatten[0])
            self.model = put_weights_and_biases(self.model, Wb)

            # Computing MSE
            y_pred = self.model.predict(self.X_train)
            mse = MeanSquaredError()
            loss = mse(self.y_train, y_pred).numpy()

            self.population[index][1] = loss
        self.population.sort(key=lambda x: x[1])
        self.population = self.population[:self.n_population]
Esempio n. 10
0
    def setUp(self):
        # Init some simple data (y = 1 iff x[0] = 1)
        X = [[0, 10], [0, 0], [0, 1], [1, 0], [1, 2], [1, -1], [1, 3]]
        y = [0, 0, 0, 1, 1, 1, 1]

        # Init simple binary classifier
        model = Sequential()
        model.add(Dense(32, activation='relu', input_dim=2))
        model.add(Dense(1, activation='sigmoid'))

        # Train model
        model.compile(optimizer='adam',
                      loss=MeanSquaredError(),
                      metrics=['accuracy'])

        model.fit(X, y, epochs=10, verbose=0)
        self.model = model
Esempio n. 11
0
    def getErrors(self, predictions, test_data):

        mse = MeanSquaredError()
        mse_outcome = mse(np.insert(predictions, 0, self.batch_size, axis=0),
                          np.insert(test_data, 0, self.batch_size,
                                    axis=0)).numpy()

        mae = MeanAbsoluteError()
        mae_outcome = mae(np.insert(predictions, 0, self.batch_size, axis=0),
                          np.insert(test_data, 0, self.batch_size,
                                    axis=0)).numpy()

        mape = MeanAbsolutePercentageError()
        mape_outcome = mape(np.insert(predictions, 0, self.batch_size, axis=0),
                            np.insert(test_data, 0, self.batch_size,
                                      axis=0)).numpy()

        return (mse_outcome, mae_outcome, mape_outcome)
Esempio n. 12
0
def plot_model_results(data):
    '''
    Plot the accuracy of determined hyperparameters in training the model.

    Parameters
    ----------
    Data: pd.DataFrame
        Data on the model fit previously done.
    training_size: int
        Number of sample in training dataset.
    
    Returns
    -------
    None
    '''
    fig = plt.figure(figsize=(5, 5))
    fig.subplots_adjust(left=0.01,
                        right=0.985,
                        top=0.99,
                        bottom=0.01,
                        wspace=0)
    ax = plt.axes(projection="3d")
    correct = 0
    for idx, row in data.iterrows():
        true_label = row['y']
        decision = row['Predicted Label']
        x = row['x0']
        y = row['x1']
        ax.scatter3D(x, y, true_label, marker='.', color='g', alpha=0.1)
        ax.scatter3D(x, y, decision, marker='.', color='b', alpha=0.1)
    mse = MeanSquaredError()
    accuracy = mse(data['y'], data['Predicted Label']).numpy()
    print('Model accuracy was %.3f' % (accuracy))
    legend_elements = [
        Patch(facecolor='g', edgecolor='g', label='Original'),
        Patch(facecolor='b', edgecolor='b', label='Predicted')
    ]
    ax.legend(handles=legend_elements, title='Value', loc='upper right')
    ax.set_xlabel('x0')
    ax.set_ylabel('x1')
    ax.set_zlabel('y')
    plt.savefig('./q1_classified_data.jpg')
    plt.clf()
    return accuracy
    def dnn(self, path=None, name=None):
        with tf.device('/device:GPU:0'):
            model = Sequential()
            # print(self.X_train.shape[1])
            model.add(Dense(self.X_train.shape[1], input_shape=(self.X_train.shape[1],)))
            model.add(Activation('relu'))
            for i in range(3):
                model.add(Dense(self.N_HIDDEN))
                model.add(Activation('relu'))
            model.add(Dense(1))
            model.add(Activation('soft-max'))
            model.summary()
            model.compile(loss='mse',
                          optimizer=self.OPTIMIZER,
                          metrics=['accuracy'])
            history = model.fit(self.X_train, self.y_train,
                                epochs=self.NB_EPOCH,
                                verbose=self.VERBOSE)
            print(self.X_train)

            y_pred = model.predict(self.X_test)

            y_pred = y_pred.reshape(-1)

            plt.plot(self.y_test)
            plt.plot(y_pred)
            plt.legend(['real', 'prediction'])
            plt.savefig(f'./results/{name}.png')
            plt.clf()
            # plt.show()

            mse = MeanSquaredError()
            loss_mse = mse(self.y_test, y_pred).numpy()

            loss_rmse = np.sqrt(loss_mse)

            mae = MeanAbsoluteError()
            loss_mae = mae(self.y_test, y_pred).numpy()

            mape = MeanAbsolutePercentageError()
            loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mse, loss_mae, loss_mape
    def svr(self):
        from sklearn.svm import SVR
        from sklearn.pipeline import make_pipeline
        from sklearn.preprocessing import StandardScaler

        regr = make_pipeline(SVR(C=1.0, epsilon=0.2))
        regr.fit(self.X_train, self.y_train)

        y_pred = regr.predict(self.X_test)

        y_pred = y_pred.reshape(-1)

        mse = MeanSquaredError()
        loss_mse = mse(self.y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(self.y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(self.y_test, y_pred).numpy()

        return loss_rmse, loss_mae, loss_mape
Esempio n. 15
0
def _loss_56_2(y_true, y_pred, sample_weight=None):
    return .5 * BinaryCrossentropy()(y_true, y_pred, sample_weight) + \
           .25 * MeanAbsoluteError()(y_true, y_pred, sample_weight) + \
           .25 * MeanSquaredError()(y_true, y_pred, sample_weight)
Esempio n. 16
0
def _loss_224(y_true, y_pred, sample_weight=None):
    return MeanAbsoluteError()(y_true, y_pred, sample_weight) + \
           MeanSquaredError()(y_true, y_pred, sample_weight) + \
           5. * SobelEdgeLoss()(y_true, y_pred, sample_weight)
    plt.title("Variation of loss with epochs")
    plt.show()
    
def plotCorrelation():
    # plotting the correlation between actual and predicted DHI
    plt.figure()
    plt.scatter(y_pred, y_test, marker = '.', color = 'g', linewidths = 0.01, label = 'Actual value')
    plt.plot(y_pred, y_pred, color = 'r', label = 'Fitted line')
    plt.xlabel("Predicted radiation $(W/m^2)$")
    plt.ylabel("Actual radiation $(W/m^2)$")
    plt.grid(True)
    plt.legend()
    plt.show()

mae = []
for i in range(1, 4):
    X_train, X_test, y_train, y_test = initializeInputOutput(df, 15, i)
    model = myModel(X_train.shape[1:])
    
    model.compile(loss=MeanSquaredError(), optimizer='RMSprop',
                  metrics=[MeanAbsoluteError()])
    
    history = model.fit(X_train, y_train, epochs=50, batch_size=512, verbose=0,
                        validation_data=(X_test, y_test)) 
    
    mae.append(model.evaluate(X_test, y_test, verbose = 0)[1])
    print(str(i) + ".", "MAE:", mae[i-1])
    
y_pred = model.predict(X_test)
plotCorrelation()
Esempio n. 18
0
def main():
    table = 'Table2'
    stack = 'Stack2'
    gas = 'SO2'
    NN = 'LSTM'
    OP = 'MVO'
    data = pd.read_excel(stack + '.xlsx')
    # data = data.set_index(data.iloc[:, 0])
    # data = data.iloc[:, 1:]
    # data = data.dropna()
    # data = data.iloc[1:]
    # data.to_excel('Stack2.xlsx')

    W_S = data['W_S']
    T = data['T']
    data = data[gas]
    # 250 490 737 985

    may = data[:250]
    june = data[250:490]
    july = data[490:737]
    agust = data[737:985]
    september = data[985:]

    may_w_s = W_S[:250]
    june_w_s = W_S[250:490]
    july_w_s = W_S[490:737]
    agust_w_s = W_S[737:985]
    september_w_s = W_S[985:]

    may_t = T[:250]
    june_t = T[250:490]
    july_t = T[490:737]
    agust_t = T[737:985]
    september_t = T[985:]

    d = [may, june, july, agust, september]
    d_w_s = [may_w_s, june_w_s, july_w_s, agust_w_s, september_w_s]
    d_t = [may_t, june_t, july_t, agust_t, september_t]

    dd = ['may', 'june', 'july', 'agust', 'september']

    BS = 3
    TS = None

    p = dict()
    pp = dict()
    # for i in range(5):
    #     dnn = DNN(d[i], BS, TS)
    #     rmse, mae, mape = dnn.svr()
    #     p[dd[i]] = [rmse, mae, mape]
    # pp = pd.DataFrame(p)
    # pp.to_excel('2so.xlsx')
    for i in range(5):
        X = d[i]

        X, y = prepare_data_window(X, BS, d_w_s[i], d_t[i])
        y = np.array(y)
        y = y.reshape(-1, 1)

        scaler_X = MinMaxScaler()
        scaler_X = scaler_X.fit(X)
        X = scaler_X.transform(X)

        scaler_y = MinMaxScaler()
        scaler_y = scaler_y.fit(y)
        y = scaler_y.transform(y)

        y = y.reshape(-1, 1)

        # LSTM************************
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=.33,
                                                            shuffle=False)
        X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
        X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))

        y_train = np.array(y_train)
        y_test = np.array(y_test)

        y_train = y_train.flatten()
        y_test = y_test.flatten()

        input_shape = X_train.shape
        model = Model().get_lstm(input_shape)
        Wb = get_weights_and_biases(model)
        Wb_flatten = flatten_weights_and_biases(Wb)
        dimensions = len(Wb_flatten)

        # # PSO
        # pso = PSO(model, dimensions, X_train, y_train, None,
        #                 init_weights=None, n_iteration=50)
        # cost, pos = pso.PSO()

        # MVO
        mvo = MVO(model, dimensions, X_train, y_train)
        cost, pos = mvo.MVO()

        model = Model().get_lstm(input_shape)
        Wb_model = unflatten_weights_and_biases(Wb, pos)
        model = put_weights_and_biases(model, Wb_model)
        y_pred = model.predict(X_test)
        # # mse = MeanSquaredError()
        # # loss = mse(y_test, y_pred).numpy()
        #
        # # with open(f'./Results/{file_name}', 'w') as f:
        # #     f.write(str(loss))
        # # LSTM--------------------------------

        # # ENN+++++++++++++++++++++++++++++++++
        # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.33, shuffle=False)
        # y_train = y_train.reshape(-1, 1)
        #
        # y_train = np.array(y_train)
        # y_test = np.array(y_test)
        #
        # # y_train = y_train.flatten()
        # # y_test = y_test.flatten()
        #
        # net = Model().get_elman(3)
        # w, s = get_elman_weights(net)
        #
        # dimensions = len(w)
        #
        # # error = net.train(X_train, y_train, epochs=500, show=100, goal=0.01)
        #
        # # PSO
        # pso_elman = PSO_ENN.PSO(net, dimensions, X_train, y_train, None,
        #           init_weights=None, n_iteration=50)
        # cost, pos = pso_elman.PSO()
        #
        # # # MVO
        # # mvo = MVO_ENN.MVO(net, dimensions, X_train, y_train)
        # # cost, pos = mvo.MVO()
        #
        # net = set_elman_weights(net, pos)
        #
        # y_pred = net.sim(X_test)
        #
        # # # model = Model().get_lstm(input_shape)
        # # # Wb_model = unflatten_weights_and_biases(Wb, pos)
        # # # model = put_weights_and_biases(model, Wb_model)
        # # # y_pred = model.predict(X_test)
        #
        # # ENN---------------------------------

        mse = MeanSquaredError()
        loss_mse = mse(y_test, y_pred).numpy()

        loss_rmse = np.sqrt(loss_mse)

        mae = MeanAbsoluteError()
        loss_mae = mae(y_test, y_pred).numpy()

        mape = MeanAbsolutePercentageError()
        loss_mape = mape(y_test, y_pred).numpy()

        p[dd[i]] = [loss_rmse, loss_mae, loss_mape]

        file_name_real = f'{stack}_{gas}_{dd[i]}_{NN}_{OP}_real.xlsx'
        file_name_pred = f'{stack}_{gas}_{dd[i]}_{NN}_{OP}_pred.xlsx'

        pp[file_name_real] = y_test
        pp[file_name_pred] = y_pred

        # plt.plot(y_pred)
        # plt.plot(y_test)
        # plt.show()

    p2 = pd.DataFrame(p)
    p2.to_excel('2so.xlsx')

    pp2 = pd.DataFrame.from_dict(pp, orient='index')
    pp2 = pp2.transpose()
    pp2.to_excel(f'{table}_{stack}_{gas}_{NN}_{OP}.xlsx')
Esempio n. 19
0
model.add(Activation('relu'))
model.add(Dense(11))

try:
    model.load_weights("model.h5")
except:
    print('no weights file found')

folder_name = datetime.datetime.now().strftime(
    "%Y%m%d-%H%M%S")  #creates unique folder name each time file is run

log_directory = os.path.join('logs', folder_name)  #creates this folder in logs

writer = tf.summary.create_file_writer(logdir=log_directory)

model.compile(loss=MeanSquaredError(), optimizer=Adam(learning_rate=0.0001))

epsilon = 0.1
gamma = 0.99
state = env.no_op()
replay_memory = []

for current_frame in range(0, 1000000):
    loss = 0
    if random.random() <= epsilon or current_frame < 3200:
        action = env.action_space.sample()  #random action is taken
    else:
        #highest value is the action taken
        q = model.predict(state)  #tensor (1,11)
        m = 0
        for x in range(0, 11):
Esempio n. 20
0
with open(options.config, 'r') as file:
    config = yaml.safe_load(file)

features = config['extracted_features']
col_id_name = config['col_id_name']
col_target_name = config['col_target_name']
dropped_ids = config['dropped_ids']

col_id_name = 'Id'
col_target_name = 'SalePrice'
dropped_ids = [524, 1299]

Xs = load_x(features, dropped_ids)
X_train_all = Xs['train']
X_test = Xs['test']
print_exit(X_test.isnull().sum())
y_train_all = load_y(col_id_name, col_target_name, dropped_ids)

model = Sequential()
model.add(Dense(units=64, activation='relu', input_dim=6))
model.add(Dense(units=10, activation='softmax'))
model.compile(loss=MeanSquaredError(),
              optimizer='sgd',
              metrics=[MeanSquaredError()])
model.fit(X_train_all.values, y_train_all.values, epochs=5, batch_size=32)
y_pred_logarithmic = model.predict(X_test.values)
pred = np.expm1(y_pred_logarithmic)
score = r2_score(X_test, pred)
print(score)
Esempio n. 21
0
print('xtrain:', len(x_train))
print('ytrain:', len(y_train))
print('x_val:', len(x_val))
print('y_val:', len(y_val))

train_set = XY_dataset(x_train,y_train,"train",batch_size)
val_set = XY_dataset(x_val,y_val,"validation")

# Free up RAM in case the model definition cells were run multiple times
#keras.backend.clear_session()

# Build model
model = get_model((image_side,image_side), num_classes)
#model.summary()

model.compile(loss=MeanSquaredError(), optimizer=Adam(), metrics=['accuracy'])

model.fit(train_set,
          batch_size=batch_size, # Only change batch size at top of file!
          epochs=1,
          verbose=True,
          validation_data=val_set,
          steps_per_epoch=math.floor(len(train_set)/batch_size), # Don't change steps_per_epoch!
          validation_steps=len(val_set)) # Don't change validation_steps!

val_set = XY_dataset(x_val,y_val,"test")

score = model.evaluate(val_set, verbose=True)
print(f"\n\nCheck if the bar above says {len(val_set)}/{len(val_set)}. If not: call Aart!")

print('Test loss:', score[0])