예제 #1
0
 def _create_new_nn(self, weights, biases):
     mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
     mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
     mlp.coefs_ = weights
     mlp.intercepts_ = biases
     mlp.out_activation_ = 'softmax'
     return mlp
예제 #2
0
    def initMlp(self, netParams):
        """
        initializes a MultiLayer Perceptron (MLP) Regressor with the desired network architecture (layers)
        and network parameters (weights and biases).
        :param netParams: a list of floats representing the network parameters (weights and biases) of the MLP
        :return: initialized MLP Regressor
        """

        # create the initial MLP:
        mlp = MLPRegressor(hidden_layer_sizes=(HIDDEN_LAYER, ), max_iter=1)

        # This will initialize input and output layers, and nodes weights and biases:
        # we are not otherwise interested in training the MLP here, hence the settings max_iter=1 above
        mlp.fit(
            np.random.uniform(low=-1, high=1, size=INPUTS).reshape(1, -1),
            np.ones(OUTPUTS))

        # weights are represented as a list of 2 ndarrays:
        # - hidden layer weights: INPUTS x HIDDEN_LAYER
        # - output layer weights: HIDDEN_LAYER x OUTPUTS
        numWeights = INPUTS * HIDDEN_LAYER + HIDDEN_LAYER * OUTPUTS
        weights = np.array(netParams[:numWeights])
        mlp.coefs_ = [
            weights[0:INPUTS * HIDDEN_LAYER].reshape((INPUTS, HIDDEN_LAYER)),
            weights[INPUTS * HIDDEN_LAYER:].reshape((HIDDEN_LAYER, OUTPUTS))
        ]

        # biases are represented as a list of 2 ndarrays:
        # - hidden layer biases: HIDDEN_LAYER x 1
        # - output layer biases: OUTPUTS x 1
        biases = np.array(netParams[numWeights:])
        mlp.intercepts_ = [biases[:HIDDEN_LAYER], biases[HIDDEN_LAYER:]]

        return mlp
def get_mlpn_predict(X, parameters, hidden_n):
    model = MLPRegressor(hidden_layer_sizes=hidden_n,
                         activation='logistic',
                         max_iter=1)
    y = np.random.rand(X.shape[0])
    model.fit(X, y)
    weights1 = []
    weights2 = []
    bias1 = []
    bias2 = []
    for i in range(X.shape[1]):
        weights1.append(parameters[i * hidden_n:(i + 1) * hidden_n])
    p_index = X.shape[1] * hidden_n
    bias1.append(parameters[p_index:p_index + hidden_n])
    p_index = p_index + hidden_n
    for i in range(p_index, p_index + hidden_n):
        weights2.append([parameters[i]])
    p_index = p_index + hidden_n
    bias2.append(parameters[p_index:])
    weights1 = np.array(weights1)
    bias1 = np.array(bias1)
    weights2 = np.array(weights2)
    bias2 = np.array(bias2)
    weights = [weights1, weights2]
    bias = [bias1, bias2]
    model.coefs_ = weights
    model.intercepts_ = bias
    predvalue = model.predict(X)
    return np.array(predvalue)
예제 #4
0
def deserialize_mlp_regressor(model_dict):
    model = MLPRegressor(**model_dict['params'])

    model.coefs_ = model_dict['coefs_']
    model.loss_ = model_dict['loss_']
    model.intercepts_ = model_dict['intercepts_']
    model.n_iter_ = model_dict['n_iter_']
    model.n_layers_ = model_dict['n_layers_']
    model.n_outputs_ = model_dict['n_outputs_']
    model.out_activation_ = model_dict['out_activation_']

    return model
예제 #5
0
          ],
          [
              1.53168374e+00, 1.84291014e+00, -2.24414250e-02, 3.46318652e-02,
              3.69355801e-75, 1.53155037e+00, 9.85110048e-51, -4.91456836e-54,
              -1.06266143e-47, 1.46674520e+00
          ]]),
    list([[7.61211153e-01], [5.29023058e-01], [-6.76783513e-01],
          [-1.23527535e-01], [1.04599422e-01], [1.06178562e+00],
          [-1.09977597e-43], [-1.22990539e-90], [-3.14851814e-21],
          [7.33380751e-01]])
]

net.intercepts_ = [
    list([
        1.44847648, 1.47542637, 0.51003163, 0.45278632, -0.0056204, 1.53020242,
        -0.23453891, -0.00187764, -0.21982535, 1.69397764
    ]),
    list([1.9355952])
]

net.n_outputs_ = 1
net.n_layers_ = 3
net.out_activation_ = "identity"


def dotProd(a, b):
    return sum(a[i] * b[i] for i in xrange(len(a)))


def arrToTuple(arr):
    tupArr = [tuple(elem) for elem in arr]