예제 #1
0
 def _create_new_nn(self, weights, biases):
     mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
     mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
     mlp.coefs_ = weights
     mlp.intercepts_ = biases
     mlp.out_activation_ = 'softmax'
     return mlp
예제 #2
0
def contructANN(input_size, hidden_layers=[16, 16]):
    ann = MLPRegressor(hidden_layer_sizes=[input_size] + hidden_layers + [1],
                       activation='logistic')
    ann._random_state = np.random.RandomState(np.random.randint(2**32))
    ann._initialize(np.empty((1, 1)), [input_size] + hidden_layers + [1])
    ann.out_activation_ = 'logistic'
    return ann
예제 #3
0
 def _create_first_population(self):
     self._current_population = []
     for _ in range(self._n_individuals):
         mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
         mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
         mlp.out_activation_ = 'softmax'
         self._current_population.append([mlp,0])
예제 #4
0
def deserialize_mlp_regressor(model_dict):
    model = MLPRegressor(**model_dict['params'])

    model.coefs_ = model_dict['coefs_']
    model.loss_ = model_dict['loss_']
    model.intercepts_ = model_dict['intercepts_']
    model.n_iter_ = model_dict['n_iter_']
    model.n_layers_ = model_dict['n_layers_']
    model.n_outputs_ = model_dict['n_outputs_']
    model.out_activation_ = model_dict['out_activation_']

    return model
                           power_t=0.5,
                           max_iter=1000,
                           shuffle=True,
                           random_state=None,
                           tol=0.0001,
                           verbose=False,
                           warm_start=False,
                           momentum=0.9,
                           nesterovs_momentum=True,
                           early_stopping=False,
                           validation_fraction=0.1,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=1e-08)

        nnr.out_activation_ = 'relu'  #definindo a função de ativação para a camada de saída

        nnr.fit(X, y)  #treinando a rede

        Xq = np.asarray(
            l
        )[npoints_train:, 0:
          5]  #definindo os padrões de entrada para a fase de teste (1/3 do conjunto de dados)
        photoz = nnr.predict(Xq)  #testando a rede para o treinamento i

        Zq = np.asarray(l)[
            npoints_train:,
            5]  #definindo as saídas desejadas para comparação na fase de teste

        sigma.append(
            sigmaNMAD(photoz, Zq)
예제 #6
0
t = time.process_time()
predictions = knn_reg.predict(X_test)
elapsed_test = time.process_time() - t

rmsle_knn = np.sqrt(mean_squared_log_error(y_test,predictions))
mae_knn = mean_absolute_error(y_test, predictions)
r2 = r2_score(y_test, predictions, multioutput='variance_weighted')
adj_r2 = 1 - ((1 - r2)*((len(y_test)-1)/(len(y_test)-len(X_test.columns) - 1)))
r2_knn = r2
adj_r2_knn = adj_r2
'''

t = time.process_time()
nn = MLPRegressor(hidden_layer_sizes=(2, ),
                  activation='relu').fit(X_train, y_train)
nn.out_activation_ = 'relu'
elapsed_train = time.process_time() - t
t = time.process_time()
predictions = nn.predict(X_test)
elapsed_test = time.process_time() - t

rmsle_nn = np.sqrt(mean_squared_log_error(y_test, predictions))
mae_nn = mean_absolute_error(y_test, predictions)
r2 = r2_score(y_test, predictions, multioutput='variance_weighted')
adj_r2 = 1 - ((1 - r2) * ((len(y_test) - 1) /
                          (len(y_test) - len(X_test.columns) - 1)))
r2_nn = r2
adj_r2_nn = adj_r2
'''
print('training time: ', elapsed_train)
print('prediction time: ', elapsed_test)
    def generate(self, seed=None, thread_num=None):
        """
        Generate a random structure based on the genes given in the output.

        Parameters
        ----------
        seed : A random seed for setting the weights.

        Returns
        -------
        A Parser class that can be passed to the structure class.
        """
        # we want to make sure our generated structure is good

        ann = self.parser.getGAParameters()['ann']
        random.seed(seed)
        np.random.seed(seed)

        if ann:
            clean_generation = False

            while not clean_generation:
                new_parser = copy.deepcopy(self.parser)

                ann_params = self.parser.getAnnParameters()
                ga_params = self.parser.getGAParameters()

                ann = MLPRegressor(
                    hidden_layer_sizes=tuple(ann_params['neurons']) +
                    (len(self.parser.getGenes()), ),
                    activation=ann_params['activation'])
                layers = [ann_params['neurons'][0]] + ann_params['neurons'] + [
                    len(self.parser.getGenes())
                ]
                input_vec = np.ones((1, ann_params['neurons'][0]))
                output_vec = np.empty((1, len(self.parser.getGenes())))
                ann._random_state = np.random.RandomState(seed)

                ann._initialize(output_vec, layers)
                ann.out_activation_ = ann_params['activation']

                new_parser.ann = ann

                outputs = new_parser.ann.predict(input_vec)

                old_config = self.parser.getConfig()
                new_config = new_parser.getConfig()
                for gene, output in zip(self.parser.getGenes(), outputs[0]):
                    val = getFromDict(old_config, gene['path'])
                    new_val = (gene['range'][1] -
                               gene['range'][0]) * output + gene['range'][0]
                    setInDict(new_config, gene['path'], new_val)
                    new_config, clean_generation = self.checkAndUpdate(
                        new_config, gene, val, new_val)

            new_parser.updateConfig(new_config)

            identifier = self.n_generated + thread_num
            history = [self.n_generated]
            s = Structure(new_parser, identifier, history)
            return s
        else:
            clean_generation = False

            while not clean_generation:
                new_parser = copy.deepcopy(self.parser)
                old_config = self.parser.getConfig()
                new_config = new_parser.getConfig()
                for gene in self.parser.getGenes():
                    val = getFromDict(old_config, gene['path'])
                    new_val = (gene['range'][1] - gene['range'][0]
                               ) * np.random.uniform() + gene['range'][0]
                    setInDict(new_config, gene['path'], new_val)
                    new_config, clean_generation = self.checkAndUpdate(
                        new_config, gene, val, new_val)

            new_parser.updateConfig(new_config)

            identifier = self.n_generated + thread_num
            s = Structure(new_parser, identifier, [])
            return s
예제 #8
0
start_train = pd.Timestamp('2009-1-1', tz=df.index.tz)
end_train = pd.Timestamp('2012-1-1', tz=df.index.tz)
end_test = pd.Timestamp('2019-1-1', tz=df.index.tz)

X_train, y_train, index, data = util.gen_data(regions,
                                              start_train,
                                              end_train,
                                              weather=weather,
                                              holidays=holidays)

regressor = MLPRegressor(hidden_layer_sizes=(128, 32),
                         tol=4,
                         max_iter=300,
                         verbose=True,
                         random_state=rs)
regressor.out_activation_ = 'relu'

scaler = StandardScaler()
X_scaled = scaler.fit_transform(X_train)
regressor.fit(X_scaled, y_train)

X_test, y_test, index, data = util.gen_data(regions,
                                            end_train,
                                            end_test,
                                            weather=weather,
                                            holidays=holidays)
X_test = scaler.transform(X_test)
predictions = regressor.predict(X_test)
index['pred'] = predictions
predictions = index.set_index(['date', 'region']).unstack()
predictions.columns = predictions.columns.droplevel()
예제 #9
0
          [-1.23527535e-01], [1.04599422e-01], [1.06178562e+00],
          [-1.09977597e-43], [-1.22990539e-90], [-3.14851814e-21],
          [7.33380751e-01]])
]

net.intercepts_ = [
    list([
        1.44847648, 1.47542637, 0.51003163, 0.45278632, -0.0056204, 1.53020242,
        -0.23453891, -0.00187764, -0.21982535, 1.69397764
    ]),
    list([1.9355952])
]

net.n_outputs_ = 1
net.n_layers_ = 3
net.out_activation_ = "identity"


def dotProd(a, b):
    return sum(a[i] * b[i] for i in xrange(len(a)))


def arrToTuple(arr):
    tupArr = [tuple(elem) for elem in arr]
    return tuple(tupArr)


def isEndState(state):
    return state[1] == 0

예제 #10
0
    def test_regressor(X_train,
                       y_train,
                       X_test,
                       y_test,
                       nn_layers,
                       sk_hidden_layers,
                       input_activation,
                       output_activation,
                       alpha=0.0):

        if input_activation == "sigmoid":
            sk_input_activation = "logistic"
        else:
            sk_input_activation = input_activation

        if output_activation == "sigmoid":
            sk_output_activation = "logistic"
        else:
            sk_output_activation = output_activation

        mlp = MLPRegressor(
            solver='sgd',  # Stochastic gradient descent.
            activation=sk_input_activation,  # Skl name for sigmoid.
            alpha=alpha,  # No regularization for simplicity.
            hidden_layer_sizes=sk_hidden_layers)  # Full NN size is (1,3,3,1).

        mlp.out_activation_ = sk_output_activation

        # Force sklearn to set up all the necessary matrices by fitting a data
        # set. We dont care if it converges or not, so lets ignore raised
        # warnings.
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            mlp.fit(X_train, y_train)

        # =====================================================================
        n_samples, n_features = X_train.shape
        batch_size = n_samples
        hidden_layer_sizes = mlp.hidden_layer_sizes
        if not hasattr(hidden_layer_sizes, "__iter__"):
            hidden_layer_sizes = [hidden_layer_sizes]
        hidden_layer_sizes = list(hidden_layer_sizes)
        layer_units = ([n_features] + hidden_layer_sizes + [mlp.n_outputs_])
        activations = [X_test]
        activations.extend(
            np.empty((batch_size, n_fan_out)) for n_fan_out in layer_units[1:])
        deltas = [np.empty_like(a_layer) for a_layer in activations]
        coef_grads = [
            np.empty((n_fan_in_, n_fan_out_))
            for n_fan_in_, n_fan_out_ in zip(layer_units[:-1], layer_units[1:])
        ]
        intercept_grads = [
            np.empty(n_fan_out_) for n_fan_out_ in layer_units[1:]
        ]
        # =====================================================================

        mlp.out_activation_ = sk_output_activation
        activations = mlp._forward_pass(activations)
        loss, coef_grads, intercept_grads = mlp._backprop(
            X_test, y_test, activations, deltas, coef_grads, intercept_grads)

        # Activates my own MLP
        nn = MultilayerPerceptron(nn_layers,
                                  activation=input_activation,
                                  output_activation=output_activation,
                                  alpha=alpha)

        # Copy the weights and biases from the scikit-learn network to your
        # own.
        for i, w in enumerate(mlp.coefs_):
            nn.weights[i] = cp.deepcopy(w.T)
        for i, b in enumerate(mlp.intercepts_):
            nn.biases[i] = cp.deepcopy(b.T.reshape(-1, 1))

        # Call your own backpropagation function, and you're ready to compare
        # with the scikit-learn code.
        y_sklearn = mlp.predict(X_test)
        y = nn.predict(cp.deepcopy(X_test).T)

        # Asserts that the forward pass is correct
        assert np.allclose(y, y_sklearn), ("Prediction {} != {}".format(
            y, y_sklearn))

        delta_w, delta_b = nn._back_propagate(X_test.T, y_test)

        # Asserts that the the activations is correct in back propagation
        for i, a in enumerate(nn.activations):
            print(i, a.T, activations[i])
            assert np.allclose(a.T,
                               activations[i]), "error in layer {}".format(i)
        else:
            print("Activations are correct.")

        # Asserts that the the biases is correct in back propagation
        for i, derivative_bias in enumerate(delta_b):
            print(i, derivative_bias.T, intercept_grads[i])
            assert np.allclose(
                derivative_bias.T,
                intercept_grads[i]), ("error in layer {}".format(i))
        else:
            print("Biases derivatives are correct.")

        # Asserts that the the weights is correct in back propagation
        for i, derivative_weight in enumerate(delta_w):
            print(i, derivative_weight.T, coef_grads[i])
            assert np.allclose(derivative_weight.T,
                               coef_grads[i]), "error in layer {}".format(i)
        else:
            print("Weight derivatives are correct.")

        print("Test complete\n")