예제 #1
0
    def _create_models(self, X, y):
        if len(y.shape) == 1:
            n_outputs = 1
        else:
            raise ValueError(
                'Only scalar predictions are currently supported.')

        self.models_ = []

        if self.backend_ == 'sklearn':
            from sklearn.neural_network import MLPRegressor

            for model_idx in range(self.n_regressors_):
                model = MLPRegressor(
                    hidden_layer_sizes=self.layer_sizes_list_[model_idx],
                    activation=self.activations_[model_idx],
                    solver=self.solvers_[model_idx],
                    alpha=self.alphas_[model_idx],
                    batch_size=self.batch_sizes_[model_idx],
                    max_iter=self.max_iters_[model_idx],
                    momentum=self.momentums_[model_idx],
                    nesterovs_momentum=self.nesterovs_momentums_[model_idx],
                    verbose=self.verbose_,
                )
                self.models_.append(model)

        elif self.backend_ == 'keras':
            from keras import regularizers
            from keras.layers import Dense
            from keras.models import Sequential

            for model_idx in range(self.n_regressors_):
                hidden_layer_sizes = self.layer_sizes_list_[model_idx]

                model = Sequential()
                for layer_size in hidden_layer_sizes:
                    model.add(
                        Dense(layer_size,
                              kernel_initializer='normal',
                              activation=self.activations_[model_idx],
                              kernel_regularizer=regularizers.l2(0.01)))

                if self.loss_ == 'mse':
                    model.add(
                        Dense(1,
                              kernel_initializer='normal',
                              kernel_regularizer=regularizers.l2(0.01)))
                    model.compile(loss='mean_squared_error',
                                  optimizer=self.solvers_[model_idx])

                elif self.loss_ == 'gaussian_nll':
                    model.add(
                        Dense(2,
                              kernel_initializer='normal',
                              kernel_regularizer=regularizers.l2(0.01)))
                    model.compile(loss=gaussian_nll,
                                  optimizer=self.solvers_[model_idx])

                self.models_.append(model)
    def RunIterativePredict(FeaturesDFin, currentStep):

        FeaturesDF = FeaturesDFin.copy()
        FeaturesDF_raw = FeaturesDF.copy()

        if Settings["Normalise"] == "Yes":
            for col in FeaturesDF.columns:
                FeaturesDF[col] = (
                    FeaturesDF[col] -
                    FeaturesDF_raw[col].mean()) / FeaturesDF_raw[col].std()

        if Settings["Regressor"] == "NN":
            nn_options = {  # options for neural network
                'hidden_layer_sizes': (2, 1),
                'solver': 'lbfgs',
                'activation': 'tanh',
                'max_iter': 1500,  # default 200
                'alpha': 0.001,  # default 0.0001
                'random_state': None  # default None
            }
            model = MLPRegressor(**nn_options)
        elif Settings["Regressor"] == "GPR":
            mainRolling_kernel = ConstantKernel() + Matern() + DotProduct(
            ) + WhiteKernel()  # + PairwiseKernel() + RBF() + ExpSineSquared()
            #mainRolling_kernel = 1**2*ConstantKernel() + 1**2*Matern() + 1**2*DotProduct() + 1**2* ExpSineSquared() + 1**2*WhiteKernel() # + PairwiseKernel() + RBF() + ExpSineSquared()
            model = GaussianProcessRegressor(
                kernel=mainRolling_kernel,
                random_state=0,
                n_restarts_optimizer=2)  #, normalize_y=True
        elif Settings["Regressor"] == "LSTM":
            model = Sequential()
            model.add(LSTM(7, input_shape=(1, FeaturesDF.shape[1])))
            model.add(Dense(1))
            model.compile(loss='mean_squared_error', optimizer='adam')

        iterPredsList = []
        iterPreds_Std_List = []

        if "SingleStepPredict" in Settings["Reporter"]:
            fitInputX = FeaturesDF.shift(1).bfill().values
            fitTargetY = FeaturesDF[targetVarName].values.reshape(-1, 1)

            if Settings["Regressor"] == "LSTM":
                fitInputX = fitInputX.reshape(trainX, (1, FeaturesDF.shape[1]))
                for i in range(10):
                    model.fit(fitInputX,
                              fitTargetY,
                              epochs=1,
                              batch_size=5,
                              verbose=0,
                              shuffle=False)
                    model.reset_states()
            else:
                model.fit(fitInputX, fitTargetY)

            if Settings['Regressor'] in ["NN", "LSTM"]:
                firstPred = model.predict(FeaturesDF.iloc[-1].values.reshape(
                    1, -1))[0]
                firstPred_Std = 0
            elif Settings['Regressor'] == "GPR":
                firstPred, firstPred_Std = model.predict(
                    FeaturesDF.iloc[-1].values.reshape(1, -1), return_std=True)
                firstPred = firstPred[0][0]
                firstPred_Std = firstPred_Std[0]
            iterPredsList.append(firstPred)
            iterPreds_Std_List.append(firstPred_Std)

        elif "Iterative" in Settings["Reporter"]:
            "Iterative Predictions"
            inputDataList_rep = []
            for j in range(Settings["predictAhead"] - 1):
                if j == 0:
                    fitInputX = FeaturesDF.shift(1).bfill().values
                    fitTargetY = FeaturesDF[targetVarName].values.reshape(
                        -1, 1)

                    if Settings["Regressor"] == "LSTM":
                        fitInputX = np.reshape(fitInputX,
                                               (1, FeaturesDF.shape[1]))
                        for i in range(10):
                            model.fit(fitInputX,
                                      fitTargetY,
                                      epochs=1,
                                      batch_size=1,
                                      verbose=0,
                                      shuffle=False)
                            model.reset_states()
                    else:
                        model.fit(fitInputX, fitTargetY)

                    if Settings['Regressor'] in ["NN", "LSTM"]:
                        firstPred = model.predict(
                            FeaturesDF.iloc[-1].values.reshape(1, -1))[0]
                        firstPred_Std = 0
                    elif Settings['Regressor'] == "GPR":
                        firstPred, firstPred_Std = model.predict(
                            FeaturesDF.iloc[-1].values.reshape(1, -1),
                            return_std=True)
                        #print(firstPred)
                        #print(firstPred_Std)
                        #time.sleep(3000)
                        firstPred = firstPred[0][0]
                        firstPred_Std = firstPred_Std[0]
                    iterPredsList.append(firstPred)
                    iterPreds_Std_List.append(firstPred_Std)

                expanding_infectedDF = infectedDF.copy().iloc[:currentStep +
                                                              j + 1]
                newDate = expanding_infectedDF.index[-1]
                knownWeather = allWeatherDF.loc[newDate].values
                knownMobility = mobility_df.loc[newDate]
                if Settings["Normalise"] == "Yes":
                    invertNormIterPreds = [
                        x * FeaturesDF_raw[targetVarName].std() +
                        FeaturesDF_raw[targetVarName].mean()
                        for x in iterPredsList
                    ]
                else:
                    invertNormIterPreds = iterPredsList
                expanding_infectedDF.iloc[-len(iterPredsList):] = np.array(
                    invertNormIterPreds)
                expanding_infectedDF_shifted = getShifts(
                    expanding_infectedDF, Settings['lags'])

                if Settings["Scenario"] <= 1:
                    inputDataList = [invertNormIterPreds[-1]]
                    for elem in expanding_infectedDF_shifted.iloc[-1]:
                        inputDataList.append(elem)
                    for elem in knownWeather:
                        inputDataList.append(elem)
                    inputDataList.append(knownMobility)
                elif Settings["Scenario"] in [2, 3]:
                    inputDataList = [invertNormIterPreds[-1]]
                    for elem in expanding_infectedDF_shifted.iloc[-1]:
                        inputDataList.append(elem)
                elif Settings["Scenario"] == 4:
                    inputDataList = [invertNormIterPreds[-1]]
                    inputDataList.append(knownMobility)
                elif Settings["Scenario"] == 5:
                    inputDataList = [invertNormIterPreds[-1]]
                    for elem in knownWeather:
                        inputDataList.append(elem)

                inputDataList_rep.append([newDate, str(inputDataList)])

                if Settings["Normalise"] == "Yes":
                    for colCount in range(len(FeaturesDF_raw.columns)):
                        inputDataList[colCount] = (
                            inputDataList[colCount] -
                            FeaturesDF_raw.iloc[:, colCount].mean()
                        ) / FeaturesDF_raw.iloc[:, colCount].std()
                if Settings["Regressor"] == "NN":
                    inputPointArray = np.array(inputDataList)
                    iterPred = model.predict(inputPointArray.reshape(1, -1))[0]
                    iterPred_std = 0
                else:
                    inputPointArray = np.array(inputDataList)
                    iterPred, iterPred_std = model.predict(
                        inputPointArray.reshape(1, -1), return_std=True)
                    iterPred = iterPred[0][0]
                    iterPred_std = iterPred_std[0]

                iterPredsList.append(iterPred)
                iterPreds_Std_List.append(iterPred_std)

        iterPredsList.insert(0, FeaturesDF_raw.index[-1])
        iterPreds_Std_List.insert(0, FeaturesDF_raw.index[-1])
        if Settings["Normalise"] == "Yes":
            "standard normalisation"
            iterPredsList[1:] = [
                x * FeaturesDF_raw[targetVarName].std() +
                FeaturesDF_raw[targetVarName].mean() for x in iterPredsList[1:]
            ]
            iterPreds_Std_List[1:] = [
                x * FeaturesDF_raw[targetVarName].std()
                for x in iterPreds_Std_List[1:]
            ]

        if (Settings["Scenario"] == 1) & (RegionName
                                          in ["Campania", "Lombardia"]):
            pd.concat([expanding_infectedDF, infectedDF.loc[expanding_infectedDF.index], allWeatherDF.loc[expanding_infectedDF.index], mobility_df.loc[expanding_infectedDF.index], pd.DataFrame(inputDataList_rep, columns=['data', 'inputs']).set_index('data', )], axis=1)\
                .to_excel(modelDataPath+str(Settings["Scenario"])+RegionName+"_expanding_infectedDF.xlsx")

        return [iterPredsList, iterPreds_Std_List]
예제 #3
0
    def perform_fit(self,
                    amp,
                    pixel_pos,
                    training_library,
                    max_fitpoints=None,
                    nodes=(64, 64, 64, 64, 64, 64, 64, 64, 64)):
        """
        Fit MLP model to individual template pixels

        :param amp: ndarray
            Pixel amplitudes
        :param pixel_pos: ndarray
            Pixel XY coordinate format (N, 2)
        :param max_fitpoints: int
            Maximum number of points to include in MLP fit
        :param nodes: tuple
            Node layout of MLP
        :return: MLP
            Fitted MLP model
        """
        pixel_pos = pixel_pos.T

        # If we put a limit on this then randomly choose points
        if max_fitpoints is not None and amp.shape[0] > max_fitpoints:
            indices = np.arange(amp.shape[0])
            np.random.shuffle(indices)
            amp = amp[indices[:max_fitpoints]]
            pixel_pos = pixel_pos[indices[:max_fitpoints]]

        if self.verbose:
            print("Fitting template using", training_library, "with",
                  amp.shape[0], "total pixels")
        # We need a large number of layers to get this fit right
        if training_library == "sklearn":
            from sklearn.neural_network import MLPRegressor

            model = MLPRegressor(hidden_layer_sizes=nodes,
                                 activation="relu",
                                 max_iter=1000,
                                 tol=0,
                                 early_stopping=True,
                                 verbose=False,
                                 n_iter_no_change=10)

            pixel_pos = np.array([pixel_pos.T[0], np.abs(pixel_pos.T[1])]).T
            pixel_pos_neg = np.array(
                [pixel_pos.T[0], -1 * np.abs(pixel_pos.T[1])]).T

            pixel_pos = np.concatenate((pixel_pos, pixel_pos_neg))
            amp = np.concatenate((amp, amp))
            model.fit(pixel_pos, amp)

        elif training_library == "KNN":
            from sklearn.neighbors import KNeighborsRegressor

            model = KNeighborsRegressor(50)
            model.fit(pixel_pos, amp)

        elif training_library == "keras":
            from keras.models import Sequential
            from keras.layers import Dense
            import keras

            model = Sequential()
            model.add(Dense(nodes[0], activation="relu", input_shape=(2, )))

            for n in nodes[1:]:
                model.add(Dense(n, activation="relu"))

            model.add(Dense(1, activation='linear'))
            model.compile(loss='mse', optimizer="adam", metrics=['accuracy'])
            stopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     min_delta=0.0,
                                                     patience=50,
                                                     verbose=2,
                                                     mode='auto')

            model.fit(pixel_pos,
                      amp,
                      epochs=10000,
                      batch_size=50000,
                      callbacks=[stopping],
                      validation_split=0.1,
                      verbose=0)

        return model
예제 #4
0
파일: test.py 프로젝트: h83s/load_forecast
 
 
load_dh=np.reshape(load_h,(365,-1),order='C')

X=np.arange(0,np.size(load_dh, axis=0))


seed=0
np.random.seed(seed)


#create model
print('creating model')

model = Sequential()
model.add(Dense(100, input_dim=1, init='uniform', activation='relu'))
model.add(Dense(24, init='uniform', activation='sigmoid'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error'])

# training
print('Training')

model.fit(X, load_dh, batch_size=10, nb_epoch=10000, verbose=2, validation_split=0.3, shuffle=True)

scores = model.evaluate(X, load_dh)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]))



# Multilayer Perceptron to Predict International Airline Passengers (t+1, given t, t-1, t-2)
import numpy
    def perform_fit(self, amp, pixel_pos,  training_library, max_fitpoints=None,
                    nodes=(64, 64, 64, 64, 64, 64, 64, 64, 64)):
        """
        Fit MLP model to individual template pixels

        :param amp: ndarray
            Pixel amplitudes
        :param pixel_pos: ndarray
            Pixel XY coordinate format (N, 2)
        :param max_fitpoints: int
            Maximum number of points to include in MLP fit
        :param nodes: tuple
            Node layout of MLP
        :return: MLP
            Fitted MLP model
        """
        pixel_pos = pixel_pos.T

        # If we put a limit on this then randomly choose points
        if max_fitpoints is not None and amp.shape[0] > max_fitpoints:
            indices = np.arange(amp.shape[0])
            np.random.shuffle(indices)
            amp = amp[indices[:max_fitpoints]]
            pixel_pos = pixel_pos[indices[:max_fitpoints]]

        if self.verbose:
            print("Fitting template using", training_library, "with", amp.shape[0],
                  "total pixels")

        # We need a large number of layers to get this fit right
        if training_library == "sklearn":
            from sklearn.neural_network import MLPRegressor

            model = MLPRegressor(hidden_layer_sizes=nodes, activation="relu",
                                 max_iter=1000, tol=0,
                                 early_stopping=True, verbose=False,
                                 n_iter_no_change=10)

            model.fit(pixel_pos, amp)
        elif training_library == "kde":
            from KDEpy import FFTKDE
            from scipy.interpolate import LinearNDInterpolator

            x, y = pixel_pos.T
            data = np.vstack((x, y, amp))
            #print(data.shape)
            kde = FFTKDE(bw=0.015).fit(data.T)
            points, out = kde.evaluate((self.bins[0], self.bins[1], 200))
            points_x, points_y, points_z = points.T
            #print(points_z.shape, points, out.shape)

            av_z = np.average(points_z)
            # print(av_z, ((np.max(points_z)-np.min(points_z))/2.) + np.min(points_z))
            av_val = np.sum((out*points_z).reshape((self.bins[0], self.bins[1], 200)), axis=-1) / \
                np.sum(out.reshape((self.bins[0], self.bins[1], 200)), axis=-1)

            points_x = points_x.reshape((self.bins[0], self.bins[1], 200))[:, :, 0].ravel()
            points_y = points_y.reshape((self.bins[0], self.bins[1], 200))[:, :, 0].ravel()

            int_points = np.vstack((points_x, points_y)).T
            lin = LinearNDInterpolator(np.vstack((points_x, points_y)).T, av_val.ravel(), fill_value=0)

            return lin

        elif training_library == "KNN":
            from sklearn.neighbors import KNeighborsRegressor

            model = KNeighborsRegressor(10)
            model.fit(pixel_pos, amp)

        elif training_library == "loess":
            from loess.loess_2d import loess_2d
            from scipy.interpolate import LinearNDInterpolator
            sel = amp!=0
            model = loess_2d(pixel_pos.T[0][sel], pixel_pos.T[1][sel], amp[sel],
                             degree=3, frac=0.005)
            lin = LinearNDInterpolator(pixel_pos[sel], model[0])
            return lin

        elif training_library == "keras":
            from keras.models import Sequential
            from keras.layers import Dense
            import keras
            
            model = Sequential()
            model.add(Dense(nodes[0], activation="relu", input_shape=(2,)))

            for n in nodes[1:]:
                model.add(Dense(n, activation="relu"))

            model.add(Dense(1, activation='linear'))
            model.compile(loss='mean_absolute_error',
                          optimizer="adam", metrics=['accuracy'])
            stopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     min_delta=0.0,
                                                     patience=10,
                                                     verbose=2, mode='auto')
            
#            pixel_pos_neg = np.array([pixel_pos.T[0], -1 * np.abs(pixel_pos.T[1])]).T
        
#            pixel_pos = np.concatenate((pixel_pos, pixel_pos_neg))
#            amp = np.concatenate((amp, amp))
        
            model.fit(pixel_pos, amp, epochs=10000,
                      batch_size=100000,
                      callbacks=[stopping], validation_split=0.1, verbose=0)

        return model