Beispiel #1
0
def main():
    import random
    from pylab import plot, show

    def func(x):
        return math.pow(x, 2.0) - 10.0 * x + 21

    # Variando do x' até x'' (3 -> 7), dividido em 100 partes
    train_set = tuple(
        ([i], [func(i)]) for i in util.divide_arange(1.0, 9.0, 40))

    mlp = MLP(1, [10, 30, 1], ACTIVATIONS_FUNCTIONS['sigmoid'],
              ACTIVATIONS_FUNCTIONS['custom'])

    mlp.randomise_weights(lambda: random.uniform(-0.05, 0.05))

    sup = Supervisor(mlp, 0.001)

    sup.train_set(train_set, 0.0005, 10000)

    validation = tuple(
        ([x], [func(x)]) for x in util.divide_arange(-1.0, 11.0, 200))

    plot([i[0][0] for i in validation], [i[1][0] for i in validation], 'b',
         [i[0][0] for i in validation],
         [mlp.predict(i[0]) for i in validation], 'r')
    show()
Beispiel #2
0
def main():
    # https://www.mathworks.com/help/deeplearning/ug/improve-neural-network-generalization-and-avoid-overfitting.html;jsessionid=d7ccdb5dad86ecd28c93a845c8c8
    def func(x):
        return 2*math.pow(x, 3) - math.pow(x, 2) + 10*x - 4

    train_set = tuple(
        ([i], [func(i)])
        for i in util.divide_arange(-3.0, 3.0, 40)
    )

    import random
    from pylab import plot, show

    mlp = MLP(1, [10, 30, 1],
              ACTIVATIONS_FUNCTIONS['sigmoid'],
              ACTIVATIONS_FUNCTIONS['linear'])

    mlp.randomise_weights(lambda: random.uniform(-1.0, 1.0))

    sup = Supervisor(mlp, 0.01)

    sup.train_set(train_set, 0.005, 3000)

    validation = tuple(
        ([x], [func(x)])
        for x in util.divide_arange(-4.0, 4.0, 200)
    )

    plot(
        [i[0][0] for i in validation], [i[1][0] for i in validation], 'b',
        [i[0][0] for i in validation], [mlp.predict(i[0]) for i in validation], 'r'
    )
    show()