예제 #1
0
def contructANN(input_size, hidden_layers=[16, 16]):
    ann = MLPRegressor(hidden_layer_sizes=[input_size] + hidden_layers + [1],
                       activation='logistic')
    ann._random_state = np.random.RandomState(np.random.randint(2**32))
    ann._initialize(np.empty((1, 1)), [input_size] + hidden_layers + [1])
    ann.out_activation_ = 'logistic'
    return ann
    def generate(self, seed=None, thread_num=None):
        """
        Generate a random structure based on the genes given in the output.

        Parameters
        ----------
        seed : A random seed for setting the weights.

        Returns
        -------
        A Parser class that can be passed to the structure class.
        """
        # we want to make sure our generated structure is good

        ann = self.parser.getGAParameters()['ann']
        random.seed(seed)
        np.random.seed(seed)

        if ann:
            clean_generation = False

            while not clean_generation:
                new_parser = copy.deepcopy(self.parser)

                ann_params = self.parser.getAnnParameters()
                ga_params = self.parser.getGAParameters()

                ann = MLPRegressor(
                    hidden_layer_sizes=tuple(ann_params['neurons']) +
                    (len(self.parser.getGenes()), ),
                    activation=ann_params['activation'])
                layers = [ann_params['neurons'][0]] + ann_params['neurons'] + [
                    len(self.parser.getGenes())
                ]
                input_vec = np.ones((1, ann_params['neurons'][0]))
                output_vec = np.empty((1, len(self.parser.getGenes())))
                ann._random_state = np.random.RandomState(seed)

                ann._initialize(output_vec, layers)
                ann.out_activation_ = ann_params['activation']

                new_parser.ann = ann

                outputs = new_parser.ann.predict(input_vec)

                old_config = self.parser.getConfig()
                new_config = new_parser.getConfig()
                for gene, output in zip(self.parser.getGenes(), outputs[0]):
                    val = getFromDict(old_config, gene['path'])
                    new_val = (gene['range'][1] -
                               gene['range'][0]) * output + gene['range'][0]
                    setInDict(new_config, gene['path'], new_val)
                    new_config, clean_generation = self.checkAndUpdate(
                        new_config, gene, val, new_val)

            new_parser.updateConfig(new_config)

            identifier = self.n_generated + thread_num
            history = [self.n_generated]
            s = Structure(new_parser, identifier, history)
            return s
        else:
            clean_generation = False

            while not clean_generation:
                new_parser = copy.deepcopy(self.parser)
                old_config = self.parser.getConfig()
                new_config = new_parser.getConfig()
                for gene in self.parser.getGenes():
                    val = getFromDict(old_config, gene['path'])
                    new_val = (gene['range'][1] - gene['range'][0]
                               ) * np.random.uniform() + gene['range'][0]
                    setInDict(new_config, gene['path'], new_val)
                    new_config, clean_generation = self.checkAndUpdate(
                        new_config, gene, val, new_val)

            new_parser.updateConfig(new_config)

            identifier = self.n_generated + thread_num
            s = Structure(new_parser, identifier, [])
            return s
예제 #3
0
                     hidden_layer_sizes)

# X, y = mlp_estimator._validate_input(X, y, incremental)
n_samples, n_features = X.shape

# Ensure y is 2D
# TODO:保证array为两维,即输入的y应该是np.array([[1, 2, 3]])这才是1行3列的array
# if y.ndim == 1:
#     y = y.reshape((-1, 1))

mlp_estimator.n_outputs_ = y.shape[1]

layer_units = ([n_features] + hidden_layer_sizes + [mlp_estimator.n_outputs_])

# check random state
mlp_estimator._random_state = check_random_state(mlp_estimator.random_state)

incremental = False
if not hasattr(mlp_estimator, 'coefs_') or (not mlp_estimator.warm_start
                                            and not incremental):
    # First time training the model
    mlp_estimator._initialize(y, layer_units)

# lbfgs does not support mini-batches
if mlp_estimator.solver == 'lbfgs':
    batch_size = n_samples
elif mlp_estimator.batch_size == 'auto':
    batch_size = min(200, n_samples)
else:
    if mlp_estimator.batch_size < 1 or mlp_estimator.batch_size > n_samples:
        warnings.warn("Got `batch_size` less than 1 or larger than "