예제 #1
0
    def _create_models(self, X, y):
        if len(y.shape) == 1:
            n_outputs = 1
        else:
            raise ValueError(
                'Only scalar predictions are currently supported.')

        self.models_ = []

        if self.backend_ == 'sklearn':
            from sklearn.neural_network import MLPRegressor

            for model_idx in range(self.n_regressors_):
                model = MLPRegressor(
                    hidden_layer_sizes=self.layer_sizes_list_[model_idx],
                    activation=self.activations_[model_idx],
                    solver=self.solvers_[model_idx],
                    alpha=self.alphas_[model_idx],
                    batch_size=self.batch_sizes_[model_idx],
                    max_iter=self.max_iters_[model_idx],
                    momentum=self.momentums_[model_idx],
                    nesterovs_momentum=self.nesterovs_momentums_[model_idx],
                    verbose=self.verbose_,
                )
                self.models_.append(model)

        elif self.backend_ == 'keras':
            from keras import regularizers
            from keras.layers import Dense
            from keras.models import Sequential

            for model_idx in range(self.n_regressors_):
                hidden_layer_sizes = self.layer_sizes_list_[model_idx]

                model = Sequential()
                for layer_size in hidden_layer_sizes:
                    model.add(
                        Dense(layer_size,
                              kernel_initializer='normal',
                              activation=self.activations_[model_idx],
                              kernel_regularizer=regularizers.l2(0.01)))

                if self.loss_ == 'mse':
                    model.add(
                        Dense(1,
                              kernel_initializer='normal',
                              kernel_regularizer=regularizers.l2(0.01)))
                    model.compile(loss='mean_squared_error',
                                  optimizer=self.solvers_[model_idx])

                elif self.loss_ == 'gaussian_nll':
                    model.add(
                        Dense(2,
                              kernel_initializer='normal',
                              kernel_regularizer=regularizers.l2(0.01)))
                    model.compile(loss=gaussian_nll,
                                  optimizer=self.solvers_[model_idx])

                self.models_.append(model)
    def RunIterativePredict(FeaturesDFin, currentStep):

        FeaturesDF = FeaturesDFin.copy()
        FeaturesDF_raw = FeaturesDF.copy()

        if Settings["Normalise"] == "Yes":
            for col in FeaturesDF.columns:
                FeaturesDF[col] = (
                    FeaturesDF[col] -
                    FeaturesDF_raw[col].mean()) / FeaturesDF_raw[col].std()

        if Settings["Regressor"] == "NN":
            nn_options = {  # options for neural network
                'hidden_layer_sizes': (2, 1),
                'solver': 'lbfgs',
                'activation': 'tanh',
                'max_iter': 1500,  # default 200
                'alpha': 0.001,  # default 0.0001
                'random_state': None  # default None
            }
            model = MLPRegressor(**nn_options)
        elif Settings["Regressor"] == "GPR":
            mainRolling_kernel = ConstantKernel() + Matern() + DotProduct(
            ) + WhiteKernel()  # + PairwiseKernel() + RBF() + ExpSineSquared()
            #mainRolling_kernel = 1**2*ConstantKernel() + 1**2*Matern() + 1**2*DotProduct() + 1**2* ExpSineSquared() + 1**2*WhiteKernel() # + PairwiseKernel() + RBF() + ExpSineSquared()
            model = GaussianProcessRegressor(
                kernel=mainRolling_kernel,
                random_state=0,
                n_restarts_optimizer=2)  #, normalize_y=True
        elif Settings["Regressor"] == "LSTM":
            model = Sequential()
            model.add(LSTM(7, input_shape=(1, FeaturesDF.shape[1])))
            model.add(Dense(1))
            model.compile(loss='mean_squared_error', optimizer='adam')

        iterPredsList = []
        iterPreds_Std_List = []

        if "SingleStepPredict" in Settings["Reporter"]:
            fitInputX = FeaturesDF.shift(1).bfill().values
            fitTargetY = FeaturesDF[targetVarName].values.reshape(-1, 1)

            if Settings["Regressor"] == "LSTM":
                fitInputX = fitInputX.reshape(trainX, (1, FeaturesDF.shape[1]))
                for i in range(10):
                    model.fit(fitInputX,
                              fitTargetY,
                              epochs=1,
                              batch_size=5,
                              verbose=0,
                              shuffle=False)
                    model.reset_states()
            else:
                model.fit(fitInputX, fitTargetY)

            if Settings['Regressor'] in ["NN", "LSTM"]:
                firstPred = model.predict(FeaturesDF.iloc[-1].values.reshape(
                    1, -1))[0]
                firstPred_Std = 0
            elif Settings['Regressor'] == "GPR":
                firstPred, firstPred_Std = model.predict(
                    FeaturesDF.iloc[-1].values.reshape(1, -1), return_std=True)
                firstPred = firstPred[0][0]
                firstPred_Std = firstPred_Std[0]
            iterPredsList.append(firstPred)
            iterPreds_Std_List.append(firstPred_Std)

        elif "Iterative" in Settings["Reporter"]:
            "Iterative Predictions"
            inputDataList_rep = []
            for j in range(Settings["predictAhead"] - 1):
                if j == 0:
                    fitInputX = FeaturesDF.shift(1).bfill().values
                    fitTargetY = FeaturesDF[targetVarName].values.reshape(
                        -1, 1)

                    if Settings["Regressor"] == "LSTM":
                        fitInputX = np.reshape(fitInputX,
                                               (1, FeaturesDF.shape[1]))
                        for i in range(10):
                            model.fit(fitInputX,
                                      fitTargetY,
                                      epochs=1,
                                      batch_size=1,
                                      verbose=0,
                                      shuffle=False)
                            model.reset_states()
                    else:
                        model.fit(fitInputX, fitTargetY)

                    if Settings['Regressor'] in ["NN", "LSTM"]:
                        firstPred = model.predict(
                            FeaturesDF.iloc[-1].values.reshape(1, -1))[0]
                        firstPred_Std = 0
                    elif Settings['Regressor'] == "GPR":
                        firstPred, firstPred_Std = model.predict(
                            FeaturesDF.iloc[-1].values.reshape(1, -1),
                            return_std=True)
                        #print(firstPred)
                        #print(firstPred_Std)
                        #time.sleep(3000)
                        firstPred = firstPred[0][0]
                        firstPred_Std = firstPred_Std[0]
                    iterPredsList.append(firstPred)
                    iterPreds_Std_List.append(firstPred_Std)

                expanding_infectedDF = infectedDF.copy().iloc[:currentStep +
                                                              j + 1]
                newDate = expanding_infectedDF.index[-1]
                knownWeather = allWeatherDF.loc[newDate].values
                knownMobility = mobility_df.loc[newDate]
                if Settings["Normalise"] == "Yes":
                    invertNormIterPreds = [
                        x * FeaturesDF_raw[targetVarName].std() +
                        FeaturesDF_raw[targetVarName].mean()
                        for x in iterPredsList
                    ]
                else:
                    invertNormIterPreds = iterPredsList
                expanding_infectedDF.iloc[-len(iterPredsList):] = np.array(
                    invertNormIterPreds)
                expanding_infectedDF_shifted = getShifts(
                    expanding_infectedDF, Settings['lags'])

                if Settings["Scenario"] <= 1:
                    inputDataList = [invertNormIterPreds[-1]]
                    for elem in expanding_infectedDF_shifted.iloc[-1]:
                        inputDataList.append(elem)
                    for elem in knownWeather:
                        inputDataList.append(elem)
                    inputDataList.append(knownMobility)
                elif Settings["Scenario"] in [2, 3]:
                    inputDataList = [invertNormIterPreds[-1]]
                    for elem in expanding_infectedDF_shifted.iloc[-1]:
                        inputDataList.append(elem)
                elif Settings["Scenario"] == 4:
                    inputDataList = [invertNormIterPreds[-1]]
                    inputDataList.append(knownMobility)
                elif Settings["Scenario"] == 5:
                    inputDataList = [invertNormIterPreds[-1]]
                    for elem in knownWeather:
                        inputDataList.append(elem)

                inputDataList_rep.append([newDate, str(inputDataList)])

                if Settings["Normalise"] == "Yes":
                    for colCount in range(len(FeaturesDF_raw.columns)):
                        inputDataList[colCount] = (
                            inputDataList[colCount] -
                            FeaturesDF_raw.iloc[:, colCount].mean()
                        ) / FeaturesDF_raw.iloc[:, colCount].std()
                if Settings["Regressor"] == "NN":
                    inputPointArray = np.array(inputDataList)
                    iterPred = model.predict(inputPointArray.reshape(1, -1))[0]
                    iterPred_std = 0
                else:
                    inputPointArray = np.array(inputDataList)
                    iterPred, iterPred_std = model.predict(
                        inputPointArray.reshape(1, -1), return_std=True)
                    iterPred = iterPred[0][0]
                    iterPred_std = iterPred_std[0]

                iterPredsList.append(iterPred)
                iterPreds_Std_List.append(iterPred_std)

        iterPredsList.insert(0, FeaturesDF_raw.index[-1])
        iterPreds_Std_List.insert(0, FeaturesDF_raw.index[-1])
        if Settings["Normalise"] == "Yes":
            "standard normalisation"
            iterPredsList[1:] = [
                x * FeaturesDF_raw[targetVarName].std() +
                FeaturesDF_raw[targetVarName].mean() for x in iterPredsList[1:]
            ]
            iterPreds_Std_List[1:] = [
                x * FeaturesDF_raw[targetVarName].std()
                for x in iterPreds_Std_List[1:]
            ]

        if (Settings["Scenario"] == 1) & (RegionName
                                          in ["Campania", "Lombardia"]):
            pd.concat([expanding_infectedDF, infectedDF.loc[expanding_infectedDF.index], allWeatherDF.loc[expanding_infectedDF.index], mobility_df.loc[expanding_infectedDF.index], pd.DataFrame(inputDataList_rep, columns=['data', 'inputs']).set_index('data', )], axis=1)\
                .to_excel(modelDataPath+str(Settings["Scenario"])+RegionName+"_expanding_infectedDF.xlsx")

        return [iterPredsList, iterPreds_Std_List]
예제 #3
0
파일: test.py 프로젝트: h83s/load_forecast
load_dh=np.reshape(load_h,(365,-1),order='C')

X=np.arange(0,np.size(load_dh, axis=0))


seed=0
np.random.seed(seed)


#create model
print('creating model')

model = Sequential()
model.add(Dense(100, input_dim=1, init='uniform', activation='relu'))
model.add(Dense(24, init='uniform', activation='sigmoid'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error'])

# training
print('Training')

model.fit(X, load_dh, batch_size=10, nb_epoch=10000, verbose=2, validation_split=0.3, shuffle=True)

scores = model.evaluate(X, load_dh)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]))



# Multilayer Perceptron to Predict International Airline Passengers (t+1, given t, t-1, t-2)
import numpy
import matplotlib.pyplot as plt
import pandas
예제 #4
0
class Autoencoder:
    def __init__(self,
                 hidden_nodes,
                 weights_init=None,
                 weight_initializer='random_normal',
                 biases_init=None,
                 activation='sigmoid',
                 batch_size=1,
                 batch_norm=False,
                 learning_rate=1e-3,
                 momentum=0.9,
                 regularization=0,
                 optimizer='adam',
                 max_epochs=sys.maxsize,
                 convergence_criterion=(0, 10),
                 backend='keras'):

        if backend not in ['tensorflow', 'keras', 'sklearn']:
            raise ValueError("invalid backend: {}".format(backend))

        self._hidden_nodes = hidden_nodes
        self._weights_init = weights_init
        self._weight_initializer = weight_initializer
        self._biases_init = biases_init
        self._activation = activation
        self._batch_size = batch_size
        self._batch_norm = batch_norm
        self._learning_rate = learning_rate
        self._momentum = momentum
        self._regularization = regularization
        self._optimizer = optimizer
        self._max_epochs = max_epochs
        self._conv = convergence_criterion
        self._backend = backend

    @staticmethod
    def gridsearch(inputs,
                   batch_sizes,
                   learning_rates,
                   regularizations,
                   kwargs_model=None,
                   cv=10,
                   verbose=False):

        backend = kwargs_model.get('backend', 'keras')

        if backend != 'keras':
            err = "gridsearch currently only implemented for keras backend"
            raise ValueError(err)

        build_fn = AutoencoderFactoryKeras(inputs, kwargs_model)

        epochs = kwargs_model.get('max_epochs', sys.maxsize)

        estimator = KerasRegressor(build_fn=build_fn, epochs=epochs, verbose=0)

        search = GridSearchCV(estimator=estimator,
                              scoring='neg_mean_absolute_error',
                              cv=cv,
                              param_grid={
                                  'bs': batch_sizes,
                                  'lr': learning_rates,
                                  'reg': regularizations
                              },
                              return_train_score=True,
                              error_score=np.nan,
                              n_jobs=1,
                              verbose=(51 if verbose else 0))

        conv = kwargs_model.get('convergence_criterion', (0, 10))

        callbacks = [
            EarlyStopping(monitor='loss', min_delta=conv[0], patience=conv[1])
        ]

        return search.fit(inputs, inputs, callbacks=callbacks)

    def train(self,
              inputs,
              inputs_val=None,
              epochs=None,
              learning_curve=False,
              verbose=False):

        kwargs = {
            'inputs_val': inputs_val,
            'epochs': epochs,
            'learning_curve': learning_curve,
            'verbose': verbose
        }

        if self._backend == 'tensorflow':
            self._init_model_tensorflow(inputs)
            return self._train_tensorflow(inputs, **kwargs)
        if self._backend == 'sklearn':
            self._init_model_sklearn(inputs)
            return self._train_sklearn(inputs, **kwargs)
        elif self._backend == 'keras':
            self._init_model_keras(inputs)
            return self._train_keras(inputs, **kwargs)

    def _init_model_tensorflow(self, inputs):
        if self._weights_init is not None or self._biases_init is not None:
            err = "custom weights currently not supported by tf backend"
            raise ValueError(err)

    def _train_tensorflow(self, inputs, inputs_val, epochs, learning_curve,
                          verbose):

        if inputs_val is not None:
            err = "tf backend currently does not support validation"
            raise ValueError(err)

        if learning_curve and learning_curve != 'mse':
            err = "tf backend currently only supports MSE"
            raise ValueError(err)

        # process dataset
        dataset = tf.data.Dataset.from_tensor_slices(inputs)
        dataset = dataset.batch(self._batch_size)
        dataset = dataset.repeat()
        dataset = dataset.shuffle(10, inputs.shape[0])

        dataset_it = dataset.make_one_shot_iterator()
        input_layer = dataset_it.get_next()

        # construct hidden and output layer
        layer_settings = {}

        layer_settings['activation'] = {
            'sigmoid': tf.nn.sigmoid,
            'relu': tf.nn.relu,
            'elu': tf.nn.elu
        }[self._activation]

        layer_settings['kernel_initializer'] = {
            'random_normal': tf.initializers.random_normal,
            'xavier': tf.contrib.layers.xavier_initializer(uniform=False),
            'he': tf.contrib.layers.variance_scaling_initializer()
        }[self._weight_initializer]

        if self._regularization > 0:
            layer_settings['kernel_regularizer'] = \
                tf.contrib.layers.l2_regularizer(self._regularization)

        def layer(prev, nodes):
            print(layer_settings)  # TODO
            res = tf.layers.dense(prev, nodes, **layer_settings)

            if self._batch_norm:
                res = tf.layers.batch_normalization(res,
                                                    training=True,
                                                    momentum=0.9)

            return res

        hidden_layer = layer(input_layer, self._hidden_nodes)
        output_layer = layer(hidden_layer, inputs.shape[1])

        # set up otimization
        optimizer = {
            'momentum':
            tf.train.MomentumOptimizer(self._learning_rate, self._momentum),
            'momentum_nesterov':
            tf.train.MomentumOptimizer(self._learning_rate,
                                       self._momentum,
                                       use_nesterov=True),
            'adam':
            tf.train.AdamOptimizer(self._learning_rate)
        }[self._optimizer]

        loss = tf.reduce_mean(tf.square(output_layer - input_layer))
        training_op = optimizer.minimize(loss)

        # train model
        errors = []

        batches = inputs.shape[0] // self._batch_size

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            if epochs is None:
                err = "early stopping currently not supported by tf backend"
                raise ValueError(err)

            for e in range(epochs):
                for _ in range(batches):
                    _, loss_ = sess.run([training_op, loss])

                # determine current error
                errors.append(loss_)

                if verbose:
                    self._show_progress(e, epochs)

        # return learning curve
        if learning_curve:
            epochs = list(range(1, len(errors) + 1))

            return epochs, errors

    def _init_model_keras(self, inputs):
        # construct network
        def initializer(weights):
            def res(shape, dtype=None):
                assert shape == res.weights.shape

                if dtype is not None:
                    weights = res.weights.astype(dtype)
                else:
                    weights = res.weights

                return weights

            res.weights = weights

            return res

        if self._weights_init is not None:
            kernel_init_hidden = initializer(self._weights_init[0])
            kernel_init_output = initializer(self._weights_init[1])
        else:
            kernel_init_hidden = kernel_init_output = {
                'random_normal': 'RandomNormal',
                'xavier': 'glorot_normal',
                'he': 'he_normal'
            }[self._weight_initializer]

        if self._biases_init is not None:
            bias_init_hidden = initializer(self._biases_init[0])
            bias_init_output = initializer(self._biases_init[1])
        else:
            bias_init_hidden = 'Zeros'
            bias_init_output = 'Zeros'

        hidden_layer = Dense(
            self._hidden_nodes,
            input_shape=(inputs.shape[1], ),
            activation=self._activation,
            kernel_initializer=kernel_init_hidden,
            bias_initializer=bias_init_hidden,
            kernel_regularizer=l2(self._regularization),
        )

        output_layer = Dense(inputs.shape[1],
                             activation=self._activation,
                             kernel_initializer=kernel_init_output,
                             bias_initializer=bias_init_output,
                             kernel_regularizer=l2(self._regularization))

        self._model = Sequential([hidden_layer, output_layer])

        # set up optimization
        opt = {
            'momentum':
            SGD(lr=self._learning_rate, momentum=self._momentum),
            'momentum_nesterov':
            SGD(lr=self._learning_rate, momentum=self._momentum,
                nesterov=True),
            'adam':
            Adam(lr=self._learning_rate)
        }[self._optimizer]

        self._model.compile(optimizer=opt, loss='mean_squared_error')

    def _train_keras(self, inputs, inputs_val, epochs, learning_curve,
                     verbose):

        # compute initial errors
        if learning_curve:
            errors = []

            if inputs_val is not None:
                errors_val = []

        # define convergence criterion
        if epochs is None:
            callbacks = [
                EarlyStopping(monitor='loss',
                              min_delta=self._conv[0],
                              patience=self._conv[1],
                              verbose=(1 if verbose else 0))
            ]

            epochs = self._max_epochs
        else:
            callbacks = []

        # set up validation set
        if inputs_val is not None:
            validation_data = (inputs_val, inputs_val)
        else:
            validation_data = None

        # train model
        for e in range(epochs):
            h = self._model.fit(inputs,
                                inputs,
                                validation_data=validation_data,
                                batch_size=self._batch_size,
                                epochs=(e + 1),
                                initial_epoch=e,
                                callbacks=callbacks,
                                verbose=0)

            # determine current error
            if learning_curve:
                if learning_curve == 'mse':
                    errors.append(h.history['loss'][0])

                    if inputs_val is not None:
                        errors_val.append(h.history['val_loss'][0])

                elif learning_curve == 'total':
                    errors.append(self.error(inputs))

                    if inputs_val is not None:
                        errors_val.append(self.error(inputs_val))

            # show progress
            if verbose:
                self._show_progress(e, epochs)

        # return learning curve
        if learning_curve:
            epochs = list(range(1, len(errors) + 1))

            if inputs_val is not None:
                return epochs, errors, errors_val
            else:
                return epochs, errors

    def _init_model_sklearn(self, inputs):
        self._model = MLPRegressor(
            # structure
            hidden_layer_sizes=(self._hidden_nodes, ),
            # activation functions
            activation='logistic',
            # solver
            solver='sgd',
            warm_start=True,
            # batch size
            batch_size=self._batch_size,
            # learning rate
            learning_rate='constant',
            learning_rate_init=self._learning_rate,
            # momentum
            momentum=self._momentum,
            nesterovs_momentum=True,
            # regularization
            alpha=self._regularization,
            # convergence
            max_iter=self._max_epochs,
            tol=self._conv[0],
            n_iter_no_change=self._conv[1])

    def _train_sklearn(self, inputs, inputs_val, epochs, learning_curve,
                       verbose):

        if learning_curve and learning_curve != 'total':
            err = "sklearn backend currently only supports total error"
            raise ValueError(err)

        # initialize weights and biases
        if self._weights_init is None:
            self._weights_init = [
                np.random.randn(inputs.shape[1], self._hidden_nodes),
                np.random.randn(self._hidden_nodes, inputs.shape[1])
            ]

        if self._biases_init is None:
            self._biases_init = [
                np.zeros(self._hidden_nodes),
                np.zeros(inputs.shape[1])
            ]

        # initialize learning curve
        if learning_curve:
            total_errors = []

            if inputs_val is not None:
                total_errors_val = []

            best_total_error = math.inf
            dead_epochs = 0

        epoch = 0
        while True:
            if epoch == 0:
                # hack ahead, scikit learn's awful MLPRegressor interface
                # ordinarily does not allow manual weight initialization

                self._model.n_outputs_ = inputs.shape[1]

                self._model._random_state = check_random_state(
                    self._model.random_state)

                self._model._initialize(
                    inputs,
                    [inputs.shape[1], self._hidden_nodes, inputs.shape[1]])

                self._model.coefs_ = self._weights_init
                self._model.intercepts_ = self._biases_init

                continue
            else:
                self._model = self._model.partial_fit(inputs, inputs)

            # determine current error
            total_error = self.error(inputs)

            if learning_curve:
                total_errors.append(total_error)

                if inputs_val is not None:
                    total_errors_val.append(self.error(inputs_val))

            # show progress
            if verbose:
                self._show_progress(epoch, epochs)

            # check for convergence
            epoch += 1

            if epochs is None:
                if total_error >= best_total_error - sel._conv[0]:
                    dead_epochs += 1
                    if dead_epochs == self._conv[1]:
                        break

                if total_error < best_total_error:
                    best_total_error = min(best_total_error, total_error)
                    dead_epochs = 0
            else:
                if epoch > epochs:
                    break

        # return learning curve
        if learning_curve:
            total_epochs = list(range(1, len(errors) + 1))

            if inputs_val is not None:
                return total_epochs, total_errors, total_errors_val
            else:
                return total_epochs, total_errors

    def predict(self, i):
        return self._model.predict(i.reshape(1, len(i)))

    def error(self, inputs):
        total_error = 0
        for i in inputs:
            pred = self.predict(i)
            total_error += np.mean(np.abs(pred - i))

        return total_error

    @staticmethod
    def _show_progress(e, epochs):
        if epochs is None:
            print("\repoch {}".format(e))

        else:
            bar = '=' * int(50 * (e + 1) / epochs)
            progress = "[{:<50}] epoch {}/{}".format(bar, e + 1, epochs)

            print("\r" + progress, end='')
예제 #5
0
    def perform_fit(self,
                    amp,
                    pixel_pos,
                    training_library,
                    max_fitpoints=None,
                    nodes=(64, 64, 64, 64, 64, 64, 64, 64, 64)):
        """
        Fit MLP model to individual template pixels

        :param amp: ndarray
            Pixel amplitudes
        :param pixel_pos: ndarray
            Pixel XY coordinate format (N, 2)
        :param max_fitpoints: int
            Maximum number of points to include in MLP fit
        :param nodes: tuple
            Node layout of MLP
        :return: MLP
            Fitted MLP model
        """
        pixel_pos = pixel_pos.T

        # If we put a limit on this then randomly choose points
        if max_fitpoints is not None and amp.shape[0] > max_fitpoints:
            indices = np.arange(amp.shape[0])
            np.random.shuffle(indices)
            amp = amp[indices[:max_fitpoints]]
            pixel_pos = pixel_pos[indices[:max_fitpoints]]

        if self.verbose:
            print("Fitting template using", training_library, "with",
                  amp.shape[0], "total pixels")
        # We need a large number of layers to get this fit right
        if training_library == "sklearn":
            from sklearn.neural_network import MLPRegressor

            model = MLPRegressor(hidden_layer_sizes=nodes,
                                 activation="relu",
                                 max_iter=1000,
                                 tol=0,
                                 early_stopping=True,
                                 verbose=False,
                                 n_iter_no_change=10)

            pixel_pos = np.array([pixel_pos.T[0], np.abs(pixel_pos.T[1])]).T
            pixel_pos_neg = np.array(
                [pixel_pos.T[0], -1 * np.abs(pixel_pos.T[1])]).T

            pixel_pos = np.concatenate((pixel_pos, pixel_pos_neg))
            amp = np.concatenate((amp, amp))
            model.fit(pixel_pos, amp)

        elif training_library == "KNN":
            from sklearn.neighbors import KNeighborsRegressor

            model = KNeighborsRegressor(50)
            model.fit(pixel_pos, amp)

        elif training_library == "keras":
            from keras.models import Sequential
            from keras.layers import Dense
            import keras

            model = Sequential()
            model.add(Dense(nodes[0], activation="relu", input_shape=(2, )))

            for n in nodes[1:]:
                model.add(Dense(n, activation="relu"))

            model.add(Dense(1, activation='linear'))
            model.compile(loss='mse', optimizer="adam", metrics=['accuracy'])
            stopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     min_delta=0.0,
                                                     patience=50,
                                                     verbose=2,
                                                     mode='auto')

            model.fit(pixel_pos,
                      amp,
                      epochs=10000,
                      batch_size=50000,
                      callbacks=[stopping],
                      validation_split=0.1,
                      verbose=0)

        return model
    def perform_fit(self, amp, pixel_pos,  training_library, max_fitpoints=None,
                    nodes=(64, 64, 64, 64, 64, 64, 64, 64, 64)):
        """
        Fit MLP model to individual template pixels

        :param amp: ndarray
            Pixel amplitudes
        :param pixel_pos: ndarray
            Pixel XY coordinate format (N, 2)
        :param max_fitpoints: int
            Maximum number of points to include in MLP fit
        :param nodes: tuple
            Node layout of MLP
        :return: MLP
            Fitted MLP model
        """
        pixel_pos = pixel_pos.T

        # If we put a limit on this then randomly choose points
        if max_fitpoints is not None and amp.shape[0] > max_fitpoints:
            indices = np.arange(amp.shape[0])
            np.random.shuffle(indices)
            amp = amp[indices[:max_fitpoints]]
            pixel_pos = pixel_pos[indices[:max_fitpoints]]

        if self.verbose:
            print("Fitting template using", training_library, "with", amp.shape[0],
                  "total pixels")

        # We need a large number of layers to get this fit right
        if training_library == "sklearn":
            from sklearn.neural_network import MLPRegressor

            model = MLPRegressor(hidden_layer_sizes=nodes, activation="relu",
                                 max_iter=1000, tol=0,
                                 early_stopping=True, verbose=False,
                                 n_iter_no_change=10)

            model.fit(pixel_pos, amp)
        elif training_library == "kde":
            from KDEpy import FFTKDE
            from scipy.interpolate import LinearNDInterpolator

            x, y = pixel_pos.T
            data = np.vstack((x, y, amp))
            #print(data.shape)
            kde = FFTKDE(bw=0.015).fit(data.T)
            points, out = kde.evaluate((self.bins[0], self.bins[1], 200))
            points_x, points_y, points_z = points.T
            #print(points_z.shape, points, out.shape)

            av_z = np.average(points_z)
            # print(av_z, ((np.max(points_z)-np.min(points_z))/2.) + np.min(points_z))
            av_val = np.sum((out*points_z).reshape((self.bins[0], self.bins[1], 200)), axis=-1) / \
                np.sum(out.reshape((self.bins[0], self.bins[1], 200)), axis=-1)

            points_x = points_x.reshape((self.bins[0], self.bins[1], 200))[:, :, 0].ravel()
            points_y = points_y.reshape((self.bins[0], self.bins[1], 200))[:, :, 0].ravel()

            int_points = np.vstack((points_x, points_y)).T
            lin = LinearNDInterpolator(np.vstack((points_x, points_y)).T, av_val.ravel(), fill_value=0)

            return lin

        elif training_library == "KNN":
            from sklearn.neighbors import KNeighborsRegressor

            model = KNeighborsRegressor(10)
            model.fit(pixel_pos, amp)

        elif training_library == "loess":
            from loess.loess_2d import loess_2d
            from scipy.interpolate import LinearNDInterpolator
            sel = amp!=0
            model = loess_2d(pixel_pos.T[0][sel], pixel_pos.T[1][sel], amp[sel],
                             degree=3, frac=0.005)
            lin = LinearNDInterpolator(pixel_pos[sel], model[0])
            return lin

        elif training_library == "keras":
            from keras.models import Sequential
            from keras.layers import Dense
            import keras
            
            model = Sequential()
            model.add(Dense(nodes[0], activation="relu", input_shape=(2,)))

            for n in nodes[1:]:
                model.add(Dense(n, activation="relu"))

            model.add(Dense(1, activation='linear'))
            model.compile(loss='mean_absolute_error',
                          optimizer="adam", metrics=['accuracy'])
            stopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     min_delta=0.0,
                                                     patience=10,
                                                     verbose=2, mode='auto')
            
#            pixel_pos_neg = np.array([pixel_pos.T[0], -1 * np.abs(pixel_pos.T[1])]).T
        
#            pixel_pos = np.concatenate((pixel_pos, pixel_pos_neg))
#            amp = np.concatenate((amp, amp))
        
            model.fit(pixel_pos, amp, epochs=10000,
                      batch_size=100000,
                      callbacks=[stopping], validation_split=0.1, verbose=0)

        return model