for i in range(20):
	inputs.append([i])
inputs = np.array(inputs)

# targets = np.array([
#     [1],
#     [4],
#     [9],
#     [16],
#     [25]
# ])
targets = inputs**2


net = NeuralNet([
    Linear(input_size=1, output_size=2, weights = np.array([[1.0,2.0]]), biases = np.array([0.0, 0.0])),
    reLu(),
    Linear(input_size=2, output_size=1, weights = np.array([[3.0],[4.0]]), biases = np.array([0.0])),
    reLu()
])



n_epochs = 1000

#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
start_time = time.time()
loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = SGD(1e-5), iterator = BatchIterator(batch_size =  5), num_epochs = n_epochs, eps = 2000)
end_time = time.time()
print(f'Tempo gasto no treinamento: {end_time - start_time}s')
Пример #2
0
# net = NeuralNet([
#     Linear(input_size=30, output_size=16),
#     reLu(),
#     Linear(input_size=16, output_size=24),
#     reLu(),
#     Linear(input_size=24, output_size=20),
#     reLu(),
#     Linear(input_size=20, output_size=24),
#     reLu(),
#     Linear(input_size=24, output_size=1),
#     Sigmoid(),
#     Linear(input_size=1, output_size=1)
# ])
net = NeuralNet([
    Linear(input_size=30, output_size=24),
    Tanh(),
    Linear(input_size=24, output_size=30),
    Tanh(),
    Linear(input_size=30, output_size=35),
    Tanh(),
    Linear(input_size=35, output_size=1),
    Sigmoid()
])

n_epochs = 200
loss_list = train(net,
                  inputs,
                  targets,
                  optimizer=Adam(lr=1e-2, gamma1=0.3, gamma2=0.4),
                  iterator=BatchIterator(128),
Пример #3
0
        return [0, 1, 0, 0]
    else:
        return [1, 0, 0, 0]


def binary_encode(x: int) -> List[int]:
    """
    10 digit binary encoding of x
    """
    return [x >> i & 1 for i in range(10)]


inputs = np.array([binary_encode(x) for x in range(101, 1024)])

targets = np.array([fizz_buzz_encode(x) for x in range(101, 1024)])

net = NeuralNet([
    Linear(input_size=10, output_size=50),
    Tanh(),
    Linear(input_size=50, output_size=4)
])

train(net, inputs, targets, num_epochs=5000, optimizer=SGD(lr=0.001))

for x in range(1, 101):
    predicted = net.forward(binary_encode(x))
    predicted_idx = np.argmax(predicted)
    actual_idx = np.argmax(fizz_buzz_encode(x))
    labels = [str(x), "fizz", "buzz", "fizzbuzz"]
    print(x, labels[predicted_idx], labels[actual_idx])
Пример #4
0
"""
The canonical example of a function that can't
be learned with a simple linear model is XOR
"""

import numpy as np

from joelnet.train import train
from joelnet.nn import NeuralNet
from joelnet.layers import Linear, Tanh

inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

targets = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

net = NeuralNet([
    Linear(input_size=2, output_size=2),
    Tanh(),
    Linear(input_size=2, output_size=2)
])

train(net, inputs, targets)

for x, y in zip(inputs, targets):
    predicted = net.forward(x)
    print(x, predicted, y)
Пример #5
0
import sys
import time

inputs = []
n_epochs = 10000
eps = 5
for j in range(int(5)):
    inputs.append([j])
inputs = np.array(inputs)
targets = inputs**2

np.random.seed(20)

net = NeuralNet([
    Linear(input_size=1,
           output_size=2,
           weights=np.random.randn(1, 2),
           biases=np.random.randn(2)),
    reLu(),
    Linear(input_size=2,
           output_size=2,
           weights=np.random.randn(2, 2),
           biases=np.random.randn(2)),
    reLu(),
    Linear(input_size=2,
           output_size=1,
           weights=np.random.randn(2, 1),
           biases=np.random.randn(1))
])

start_time = time.time()
try:
                                                    y,
                                                    test_size=0.3,
                                                    random_state=42)

X_train = np.array(X_train)
X_test = np.array(X_test)

y_train = np.array(y_train)
y_test = np.array(y_test)

inputs = X_train
targets = np.array(y_train)
np.random.seed(2)
net = NeuralNet([
    Linear(input_size=30,
           output_size=24,
           weights=np.random.randn(30, 24),
           biases=np.random.randn(24)),
    Tanh(),
    Linear(input_size=24,
           output_size=30,
           weights=np.random.randn(24, 30),
           biases=np.random.randn(30)),
    Tanh(),
    Linear(input_size=30,
           output_size=35,
           weights=np.random.randn(30, 35),
           biases=np.random.randn(35)),
    Tanh(),
    Linear(input_size=35,
           output_size=1,
           weights=np.random.randn(35, 1),
Пример #7
0
    """
    return [x >> i & 1 for i in range(num_bits)]
NUM_ENCODE_BITS = 10
NUM_EPOCHS = 10000
inputs = np.array([
    binary_encode(x, NUM_ENCODE_BITS)
    for x in range(101, 1024)
])

targets = np.array([
    fizz_buzz_encode(x)
    for x in range(101, 1024)
])

net = NeuralNet([
    Linear(input_size=NUM_ENCODE_BITS, output_size=50),
    Tanh(),
    Linear(input_size=50, output_size=4)
])

train(net=net,
      inputs=inputs,
      targets=targets,
      num_epochs=NUM_EPOCHS,
      optimizer=SGD(lr=0.001))

for x in range(1, 101):
    predicted = net.forward(inputs=binary_encode(x))
    predicted_idx = np.argmax(predicted) # largest value is predicted class
    actual_idx = np.argmax(fizz_buzz_encode(x))
    labels = [str(x), 'fizz', 'buzz', 'fizzbuzz']
Пример #8
0
import numpy as np
import matplotlib.pyplot as plt
from joelnet.train import train
from joelnet.nn import NeuralNet
from joelnet.layers import Linear, Tanh, Sigmoid, reLu
from joelnet.optim import Optimizer, SGD, Adam
from joelnet.data import BatchIterator
from joelnet.loss import Log_loss

inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

targets = np.array([[0], [1], [1], [0]])

net = NeuralNet([
    Linear(input_size=2, output_size=4),
    Sigmoid(),
    Linear(input_size=4, output_size=4),
    Sigmoid(),
    Linear(input_size=4, output_size=1),
    Sigmoid()
])

n_epochs = 10000
loss_list = train(net,
                  inputs,
                  targets,
                  loss=Log_loss(),
                  optimizer=SGD(lr=1e-5),
                  iterator=BatchIterator(4),
                  num_epochs=n_epochs)