Esempio n. 1
0
    def test_get(self):
        nn1 = core.FeedForward(momentum=0.1, learn_rate=0.1)
        nn1 += layers.Tanh(2, 2)
        nn1 += layers.Linear(2, 2)

        nn2 = core.FeedForward(momentum=0.1, learn_rate=0.1)
        nn2 += layers.Tanh(2, 2)
        nn2 += layers.Linear(2, 2)

        ensemble = core.Ensemble(nn1, nn2)
        ensemble.fit([0, 1], [2, 1])

        stack = numpy.vstack((nn1.get([0, 0]), nn2.get([0, 0])))
        self.assertEqual(round(ensemble.get([0, 0])[0] * 1000),
                         round((stack.sum(axis=0) / len(stack))[0] * 1000))
Esempio n. 2
0
    def test_dropout_after_training(self):
        n = core.FeedForward(momentum=0.1, learn_rate=0.1)
        drop = layers.Dropout(layers.Tanh(2, 2), percentage=0.5)
        n += layers.Linear(2, 2)
        n += drop
        n += layers.Linear(2, 1)

        s = [
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [0]),
        ]

        n.fit(*s[1])
        n.fit(*s[0])
        n.fit(*s[2])
        n.fit(*s[0])
        n.fit(*s[1])

        zeros = 0
        for row in drop.y:
            if row[0] == 0:
                zeros += 1
        self.assertEqual(zeros, len(drop.w) // 2)
Esempio n. 3
0
 def test_layers_addition(self):
     v = core.FeedForward()
     v += layers.Linear(2, 3)
     v += layers.Tanh(3, 2)
     v += layers.Linear(2, 1)
     self.assertEqual(len(v.layers), 3)
     self.assertEqual(len(v.layers[1].v), 2)
Esempio n. 4
0
    def test_cv(self):
        import core.estimators
        n = core.FeedForward(momentum=0.1, learn_rate=0.1)
        n += layers.Linear(2, 2)
        n += layers.Tanh(2, 1)
        n += layers.Linear(1, 0)

        s = [
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [0]),
        ]
        error = core.estimators.cv(n, s)
        self.assertTrue(type(error) is float)
Esempio n. 5
0
    def test_by_xor(self):
        error = 0.1
        n = core.FeedForward(
            momentum=0.1,
            learn_rate=0.1)  # .create([2, 2, 1], default=layers.Tanh)

        n += layers.Tanh(2, 2)
        n += layers.Linear(2, 1)

        s = [
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [0]),
        ]

        for i in range(10_000):
            r = random.randint(0, len(s) - 1)
            n.fit(*s[r])
Esempio n. 6
0
    def test_xor_by_train(self):
        error = 0.1
        n = core.FeedForward(momentum=0.1, learn_rate=0.1)

        n += layers.Tanh(2, 2)
        n += layers.Linear(2, 1)

        s = [
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [0]),
        ]
        n.train(s, 10_000)

        for v in s:
            res = n.get(v[0])
            self.assertTrue(abs(v[1][0] - res[0]) < error)

        for v in s:
            print(n.get(v[0]), end='\n\n')
Esempio n. 7
0
 def test_tanh(self):
     l = layers.Tanh(3, 3)
     self.assertEqual(l.a(0), 0)
     self.assertEqual(l.der(0), 1)
Esempio n. 8
0
import matplotlib.pyplot as plot
import random

import layers
from core import FeedForward

nn = FeedForward(momentum=0.1, learn_rate=0.1,
                 weight_decay=0.2)  # .create([2, 2, 1], default=layers.Tanh)

nn += layers.Tanh(2, 2)
nn += layers.Linear(2, 1)

s = [
    ([0, 0], [0]),
    ([0, 1], [1]),
    ([1, 0], [1]),
    ([1, 1], [0]),
]

E = []

for i in range(10_000):
    r = random.randint(0, len(s) - 1)
    nn.fit(*s[r])
    E.append(nn.error)

print(nn)

for v in s:
    print(nn.get(v[0]), end='\n\n')
Esempio n. 9
0
 def __init__(self, *args):
     self.layer = layers.Tanh()
     self.inputs = args
Esempio n. 10
0
import numpy

import layers
from core import FeedForward, separate_data
from core.estimators import cv


def f(x):
    return 0.5 * numpy.sin(numpy.exp(x)) - numpy.cos(numpy.exp(-1 * x))


nn = FeedForward(momentum=0.2, learn_rate=0.05,
                 weight_decay=0.2)  # .create([2, 2, 1], default=layers.Tanh)

nn += layers.Tanh(1, 10)
nn += layers.Tanh(10, 10)
nn += layers.Tanh(10, 10)
nn += layers.Tanh(10, 10)

nn += layers.Linear(10, 1)

data = [([x], [f(x)]) for x in numpy.linspace(-2.2, 2.5, 150)]
ts, vs = separate_data(data, 0.15)

# duplicate x and y for easy plotting
x = numpy.linspace(-2.2, 2.5, 150)
y = f(x)

error = []
v_error = []
Esempio n. 11
0
from core.estimators import cv


def normal(arr):
    s = numpy.sum(numpy.abs(arr))
    return numpy.round(numpy.abs(arr) / s, decimals=2)


training, validation = separate_data(
    from_csv("D:\\DELETE\\Дипломмо\\output.csv"), 0.15)

# noise(training, from_range=(0, 2), axis=0)
# noise(training, from_range=(-0.05, 0.05), axis=1)

ff1 = FeedForward(learn_rate=0.05, momentum=0.2, weight_decay=0.5)
ff1 += layers.Tanh(6, 23)
ff1 += layers.Dropout(layers.Tanh(23, 28), percentage=0.3)
ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff1 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff1 += layers.Linear(28, 8)

ff2 = FeedForward(learn_rate=0.07, momentum=0.2, weight_decay=0.23)
ff2 += layers.Tanh(6, 23)
ff2 += layers.Dropout(layers.Tanh(23, 28), percentage=0.3)
ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff2 += layers.Dropout(layers.Tanh(28, 28), percentage=0.3)
ff2 += layers.Linear(28, 8)

ff3 = FeedForward(learn_rate=0.04, momentum=0.6, weight_decay=0.4)