Example #1
0
    def _connect_nodes(self, from_nodes, to_nodes, weight=None, random=False):
        """
        Connects list of from_nodes with list of to_nodes.

        Args:
            from_nodes: List of from_nodes.
            to_nodes: List of to_nodes.
            weight: Weight from connection.
            random: Flag if random number of connections.

        Notes:
            If weight is None, then weight will be chosen at random between -1 and 1.
        """

        for to_node in to_nodes:
            # If random, create random sample of connection partners
            if random:
                max_connections = self.max_connections if len(
                    from_nodes) > self.max_connections else len(from_nodes)
                random_connections = randint(1, max_connections)
                from_nodes_sample = sample(from_nodes, random_connections)
            else:
                from_nodes_sample = from_nodes
            # Connect to_node to each node in from_node_sample.
            for from_node in from_nodes_sample:
                Connection(from_node, to_node,
                           self._get_connection_weight(weight))
Example #2
0
    def _connect_flat_hidden(self, from_nodes, to_nodes):

        for to_node in to_nodes:
            #todo to trzeba zdecydowanie wywalic to jest bardzo bardzo zle !!!! nie mozna tak
            to_node.input_connections = []
            for from_node in from_nodes:
                Connection(from_node, to_node, uniform(-2, 2))
Example #3
0
    def mutation_lbfgs(self, X, y, nn, new_neurons, random_state=None):
        n_samples = y.shape[0]
        n_new_neurons = len(new_neurons)
        hidden_semantics = zeros((n_samples, n_new_neurons))
        for i, hidden_neuron in enumerate(new_neurons):
            hidden_semantics[:, i] = hidden_neuron.semantics

        layer_units = [n_new_neurons, y.shape[1]]
        activations = []
        activations.extend([hidden_semantics])
        activations.extend(
            empty((n_samples, n_fan_out)) for n_fan_out in layer_units[1:])
        deltas = [empty_like(a_layer) for a_layer in activations]
        coef_grads = [
            empty((n_fan_in_, n_fan_out_))
            for n_fan_in_, n_fan_out_ in zip(layer_units[:-1], layer_units[1:])
        ]
        intercept_grads = [empty(n_fan_out_) for n_fan_out_ in layer_units[1:]]

        solver = LBFGS()
        """ zero-weight initialization for new neurons """
        coef_init = zeros((layer_units[0], layer_units[1]))

        intercept_init = zeros(layer_units[1])
        for output_index, output_neuron in enumerate(nn.output_layer):
            intercept_init[output_index] = output_neuron.input_connections[
                0].weight

        fixed_weighted_input = zeros((n_samples, layer_units[1]))
        for output_index, output_neuron in enumerate(nn.output_layer):
            previous_bias = output_neuron.input_connections[0].weight
            fixed_weighted_input[:,
                                 output_index] = output_neuron.get_weighted_input(
                                 ) - previous_bias
            output_neuron.set_previous_bias(previous_bias)

        coefs, intercepts = solver.fit(
            X,
            y,
            activations,
            deltas,
            coef_grads,
            intercept_grads,
            layer_units,
            random_state=random_state,
            coef_init=coef_init,
            intercept_init=intercept_init,
            fixed_weighted_input=fixed_weighted_input)

        coefs = coefs[-1]
        intercepts = intercepts[-1]
        for output_index, output_neuron in enumerate(nn.output_layer):
            for i in range(n_new_neurons):
                # print('coefs[%d, %d] = %.5f\n' % (i, output_index, coefs[i, output_index]))
                Connection(new_neurons[i], output_neuron, coefs[i,
                                                                output_index])

            # print('intercepts[%d] = %.5f\n' % (output_index, intercepts[output_index]))
            output_neuron.input_connections[0].weight = intercepts[
                output_index]
Example #4
0
def create_output_neuron(activation_function,
                         bias,
                         initial_bias_connection_weight=0.0):
    neuron = Neuron(np.array([]),
                    list(),
                    activation_function,
                    cache_weighted_input=True)
    Connection(bias, neuron, initial_bias_connection_weight)
    return neuron
def create_neuron(activation_function=None, bias=None):
    """Creates neuron with defined activation function and bias."""
    # If activation function not defined, choose activation function at random.
    if not activation_function:
        activation_function = choice(list(_ACTIVATION_FUNCTIONS.keys()))
    neuron = Neuron(array([]), list(), activation_function)
    # If is biased, connect to bias with random weight.
    if bias:
        Connection(bias, neuron, uniform(-1, 1))
    return neuron
Example #6
0
 def _connect_cnn_nodes(self, from_nodes, to_nodes, full):
     """
     Connects list of from_nodes with list of to_nodes.
     Args:
         from_nodes: List of from_nodes.
         to_nodes: List of to_nodes.
         weight: Weight from connection.
         random: Flag if random number of connections.
     Notes:
         If weight is None, then weight will be chosen at random between -1 and 1.
     """
     if full:
         for to_node in to_nodes:
             Connection(from_nodes, to_node, 1)
     else:
         for to_node in to_nodes:
             # If random, create random sample of connection partners
             from_node = sample(from_nodes, 1)
             # Connect to_node to each node in from_node_sample.
             Connection(from_node, to_node, 1)
Example #7
0
    def init_lbfgs(self, X, y, nn, random_state=None):

        n_samples = y.shape[0]
        n_neurons = len(nn.hidden_layers[-1])
        hidden_semantics = zeros((n_samples, n_neurons))
        for i, hidden_neuron in enumerate(nn.hidden_layers[-1]):
            hidden_semantics[:, i] = hidden_neuron.semantics

        layer_units = [n_neurons, y.shape[1]]
        #===========================================================================
        # activations = [X]
        #===========================================================================
        activations = []
        activations.extend([hidden_semantics])
        activations.extend(
            empty((n_samples, n_fan_out)) for n_fan_out in layer_units[1:])
        deltas = [empty_like(a_layer) for a_layer in activations]
        coef_grads = [
            empty((n_fan_in_, n_fan_out_))
            for n_fan_in_, n_fan_out_ in zip(layer_units[:-1], layer_units[1:])
        ]
        intercept_grads = [empty(n_fan_out_) for n_fan_out_ in layer_units[1:]]

        solver = LBFGS()

        coef_init = zeros((layer_units[0], layer_units[1]))
        intercept_init = zeros(layer_units[1])
        coefs, intercepts = solver.fit(X,
                                       y,
                                       activations,
                                       deltas,
                                       coef_grads,
                                       intercept_grads,
                                       layer_units,
                                       random_state=random_state,
                                       coef_init=coef_init,
                                       intercept_init=intercept_init)

        coefs = coefs[-1]
        intercepts = intercepts[-1]
        hidden_neurons = nn.hidden_layers[-1]
        for output_index, output_neuron in enumerate(nn.output_layer):
            for i in range(n_neurons):
                # print('coefs[%d, %d] = %.5f\n' % (i, output_index, coefs[i, output_index]))
                Connection(hidden_neurons[i], output_neuron,
                           coefs[i, output_index])

            # print('intercepts[%d] = %.5f\n' % (output_index, intercepts[output_index]))
            output_neuron.input_connections[0].weight = intercepts[
                output_index]
Example #8
0
def _connect_nodes(from_nodes, to_nodes, weight=0):
    """Connects all from nodes with all to nodes with determined weight."""
    for to_node in to_nodes:
        for from_node in from_nodes:
            Connection(from_node, to_node, weight)
def create_output_neuron(activation_function, bias, initial_bias_connection_weight=0.0):
    neuron = Neuron(array([]), list(), activation_function)
    Connection(bias, neuron, initial_bias_connection_weight)
    return neuron