def zero_neuron(self, input_dataset):
        input_dimension = input_dataset.shape[1]
        zero_neuron = Neuron(
            [self.__calc_input_mean(input_dataset).reshape(1, 1, input_dimension)],
            (0, 0),
            None,
            None,
            self.__growing_metric
        )
        zero_neuron.input_dataset = input_dataset

        self.zero_quantization_error = zero_neuron.compute_quantization_error()

        return zero_neuron
예제 #2
0
    def __init__(self, layers: List[int]):
        """
        :param layers: List (length is L) contains neuron counts in every layer
        """
        super().__init__()
        prev_count = 1
        for k, count in enumerate(layers):
            layer = []
            min_weight = -math.sqrt(prev_count) / 2
            max_weight = -min_weight

            if k == 0:
                # Input layer. Biases = 0. Weights = 1.
                layer_weights = numpy.zeros((prev_count + 1, count))
                layer_weights[1:, :] = 1
            elif k == len(layers) - 1:
                # Output layer. Weights and biases (+1).
                layer_weights = numpy.zeros((prev_count + 1, count))
            else:
                # Hidden layers. Weights and biases (+1).
                layer_weights = numpy.random.random(
                    (prev_count + 1,
                     count)) * (max_weight - min_weight) + min_weight

            for i in range(count):
                neuron_weights = numpy.array([layer_weights[:, i]]).T
                layer.append(Neuron(neuron_weights))
            self.neurons.append(layer)
            prev_count = count
예제 #3
0
 def _create_neurons(self):
     for i in range(self.exc_count):
         neuron = Neuron('lif')
         neuron.set_current_randomly(0, TOTAL_TIME)
         self.neurons.append(neuron)
     for i in range(self.inh_count):
         neuron = Neuron('lif', 'inh')
         neuron.set_current_randomly(0, TOTAL_TIME)
         self.neurons.append(neuron)
    def new_neuron(self, weights_map, position):
        assert self.zero_quantization_error is not None, "Zero quantization error has not been set yet"

        return Neuron(
            weights_map,
            position,
            self.zero_quantization_error,
            self.__tau_2,
            self.__growing_metric
        )
예제 #5
0
파일: sgd.py 프로젝트: mnvx/neonka
def J_quadratic(neuron: Neuron, X, y):
    """
    Оценивает значение квадратичной целевой функции.
    Всё как в лекции, никаких хитростей.

    neuron - нейрон, у которого есть метод vectorized_forward_pass, предсказывающий значения на выборке X
    X - матрица входных активаций (n, m)
    y - вектор правильных ответов (n, 1)

    Возвращает значение J (число)
    """

    assert y.shape[1] == 1, 'Incorrect y shape'

    return 0.5 * np.mean((neuron.forward_pass(X) - y) ** 2)
예제 #6
0
import numpy as np
import matplotlib.pyplot as plt

from neuron.delayfbneuron import DelayFBNeuron
#  from neuron.inputneuron import InputNeuron
from neuron.nandfbneuron import NANDFBNeuron
from neuron.neuron import Neuron
#  from neuron.outputneuron import OutputNeuron
from neuron.xorfbneuron import XORFBNeuron
from neuron.delayfbneuron2 import DelayFBNeuron2

########################################################################

np.random.seed(100)
nn = 4
tnls = [Neuron(nn), XORFBNeuron(nn), DelayFBNeuron(nn), NANDFBNeuron(nn)]
print(tnls[2].__repr__())
nN = len(tnls)
N = 200
p = 0.5
nS = 2
t = np.arange(0, N, 1)
s = np.random.binomial(1, p, (N, nS))
tnout = np.zeros((nN, N), dtype=int)
for i in range(N):
    for k in range(len(tnls)):
        tnls[k] + s[i]
        tnout[k, i] = int(tnls[k].state)

fig0, axarr = plt.subplots(nS + nN,
                           1,
예제 #7
0
        [1, 2, 3],
        [4, 5, 6],
    ]
)

print(back_propagation.get_error(deltas, sums, weights))



# Подготовим данные

X = data[:, :-1]
y = data[:, -1]

X = np.hstack((np.ones((len(y), 1)), X))
y = y.reshape((len(y), 1))  # Обратите внимание на эту очень противную и важную строчку


# Создадим нейрон

w = np.random.random((X.shape[1], 1))
neuron = Neuron(w, activation_function=sigmoid, activation_function_derivative=sigmoid_derivative)

# Посчитаем пример
num_grad = compute_grad_numerically(neuron, X, y, J=J_quadratic)
an_grad = compute_grad_analytically(neuron, X, y, J_prime=J_quadratic_derivative)

print("Численный градиент: \n", num_grad)
print("Аналитический градиент: \n", an_grad)

예제 #8
0
    def __init__(self,
                 activation,
                 sample_image,
                 layer_configuration,
                 should_pool=True):

        self.__prev_neuron = Neuron(sample_image, activation, 'Ly1')
        self.__prev_neuron.set_should_pooling(should_pool)
        self.__prev_neuron.activate()
        self.__prev_neuron.plot_maps(0)
        self.__network = [self.__prev_neuron]

        size_of_config = len(layer_configuration)
        for i in range(size_of_config):
            neuron = Neuron(
                self.__prev_neuron.get_map(), activation, 'Ly{}'.format(i + 2),
                general.get_random_filter(
                    layer_configuration[i],
                    self.__prev_neuron.get_map().shape[-1]))
            neuron.set_should_pooling(should_pool)
            neuron.activate()
            neuron.plot_maps(i)
            self.__network.append(neuron)

            self.__prev_neuron = neuron

        tacos = self.__prev_neuron.get_map()
        neuron = fc_layer(tacos)  # Full Connected Layer

        neuron.set_should_pooling(
            False)  # Do not want down-sampling on output layer
        neuron.activate()
        self.__network.append(neuron)
예제 #9
0
class Network:
    def __init__(self,
                 activation,
                 sample_image,
                 layer_configuration,
                 should_pool=True):

        self.__prev_neuron = Neuron(sample_image, activation, 'Ly1')
        self.__prev_neuron.set_should_pooling(should_pool)
        self.__prev_neuron.activate()
        self.__prev_neuron.plot_maps(0)
        self.__network = [self.__prev_neuron]

        size_of_config = len(layer_configuration)
        for i in range(size_of_config):
            neuron = Neuron(
                self.__prev_neuron.get_map(), activation, 'Ly{}'.format(i + 2),
                general.get_random_filter(
                    layer_configuration[i],
                    self.__prev_neuron.get_map().shape[-1]))
            neuron.set_should_pooling(should_pool)
            neuron.activate()
            neuron.plot_maps(i)
            self.__network.append(neuron)

            self.__prev_neuron = neuron

        tacos = self.__prev_neuron.get_map()
        neuron = fc_layer(tacos)  # Full Connected Layer

        neuron.set_should_pooling(
            False)  # Do not want down-sampling on output layer
        neuron.activate()
        self.__network.append(neuron)

    def train(self, input_data, cur_epoch, batch_size):
        error_sum = 0

        for i in range(len(input_data[1])):

            has_a_car = (i + cur_epoch) % 2
            data = input_data[has_a_car]['{}'.format(i)]

            for j in range(len(self.__network) - 1):
                if j == 0:
                    # data -= numpy.mean(data, axis=0)
                    self.__network[j].set_image(data)
                else:
                    self.__network[j].set_image(self.__network[j -
                                                               1].get_map())

                self.__network[j].activate()

                self.__network[j].plot_maps(i)

            classifation = self.__network[len(self.__network) - 1].get_map()

            correct_answer = create_indacation(has_a_car)
            guess = create_indacation(classifation)

            print("\n========================================\n"
                  "the correct answer for this images is: {}"
                  "\nhowever the network has guessed: {}"
                  "\nleading to a error of {}"
                  "\n========================================\n".format(
                      correct_answer, guess, .5 * math.pow(
                          (has_a_car - classifation), 1)))

            if i == batch_size:
                avg_error = error_sum / batch_size

                print("\n==============================\n"
                      "YELLING AT THE TOP OF MY LUNGS"
                      "\n{}\n".format(avg_error))

            else:
                error_sum += .5 * math.pow((has_a_car - classifation), 1)

    def test(self):  # TODO
        pass
예제 #10
0
from neuron.neuron import Neuron


def activation(x):
    return max(x, 0)


def activation_derivative(x):
    return 1 if x > 0 else 0


net = LayeredNetwork([3, 2, 1])
# print(net)

# Custom weights
net.set_neuron(0, 0, Neuron(numpy.array([[0, 1]]).T))
net.set_neuron(0, 1, Neuron(numpy.array([[0, 1]]).T))
net.set_neuron(0, 2, Neuron(numpy.array([[0, 1]]).T))

net.set_neuron(
    1, 0,
    Neuron(
        numpy.array([[0, 0.7, 0.2, 0.7]]).T,
        activation_function=activation,
        activation_function_derivative=activation_derivative,
    ))
net.set_neuron(1, 1, Neuron(numpy.array([[0, 0.8, 0.3, 0.6]]).T))

net.set_neuron(2, 0, Neuron(numpy.array([[0, 0.2, 0.4]]).T))
# print(net)