inputs.append([i])
inputs = np.array(inputs)

# targets = np.array([
#     [1],
#     [4],
#     [9],
#     [16],
#     [25]
# ])
targets = inputs**2


net = NeuralNet([
    Linear(input_size=1, output_size=2, weights = np.array([[1.0,2.0]]), biases = np.array([0.0, 0.0])),
    reLu(),
    Linear(input_size=2, output_size=1, weights = np.array([[3.0],[4.0]]), biases = np.array([0.0])),
    reLu()
])



n_epochs = 1000

#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
start_time = time.time()
loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = SGD(1e-5), iterator = BatchIterator(batch_size =  5), num_epochs = n_epochs, eps = 2000)
end_time = time.time()
print(f'Tempo gasto no treinamento: {end_time - start_time}s')


Beispiel #2
0
#     reLu(),
#     Linear(input_size=16, output_size=24),
#     reLu(),
#     Linear(input_size=24, output_size=20),
#     reLu(),
#     Linear(input_size=20, output_size=24),
#     reLu(),
#     Linear(input_size=24, output_size=1),
#     Sigmoid(),
#     Linear(input_size=1, output_size=1)
# ])
net = NeuralNet([
    Linear(input_size=30, output_size=24),
    Tanh(),
    Linear(input_size=24, output_size=30),
    Tanh(),
    Linear(input_size=30, output_size=35),
    Tanh(),
    Linear(input_size=35, output_size=1),
    Sigmoid()
])

n_epochs = 200
loss_list = train(net,
                  inputs,
                  targets,
                  optimizer=Adam(lr=1e-2, gamma1=0.3, gamma2=0.4),
                  iterator=BatchIterator(128),
                  num_epochs=n_epochs)

y_pred = []
for x in X_test[0:1000]:
Beispiel #3
0
        return [0, 1, 0, 0]
    else:
        return [1, 0, 0, 0]


def binary_encode(x: int) -> List[int]:
    """
    10 digit binary encoding of x
    """
    return [x >> i & 1 for i in range(10)]


inputs = np.array([binary_encode(x) for x in range(101, 1024)])

targets = np.array([fizz_buzz_encode(x) for x in range(101, 1024)])

net = NeuralNet([
    Linear(input_size=10, output_size=50),
    Tanh(),
    Linear(input_size=50, output_size=4)
])

train(net, inputs, targets, num_epochs=5000, optimizer=SGD(lr=0.001))

for x in range(1, 101):
    predicted = net.forward(binary_encode(x))
    predicted_idx = np.argmax(predicted)
    actual_idx = np.argmax(fizz_buzz_encode(x))
    labels = [str(x), "fizz", "buzz", "fizzbuzz"]
    print(x, labels[predicted_idx], labels[actual_idx])
Beispiel #4
0
import matplotlib.pyplot as plt
from joelnet.train import train
from joelnet.nn import NeuralNet
from joelnet.layers import Linear, Tanh, Sigmoid, reLu
from joelnet.data import BatchIterator
from joelnet.optim import SGD, RMSProp, SGD_Nesterov, Adam
from joelnet.loss import MSE, Log_loss
import random

inputs = np.array([[1], [2], [3], [4], [5]])

targets = np.array([[1], [4], [9], [16], [25]])

net = NeuralNet([
    Linear(input_size=1, output_size=2),
    reLu(),
    Linear(input_size=2, output_size=1)
])

n_epochs = 1
#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
loss_list = train(net,
                  inputs,
                  targets,
                  loss=MSE(),
                  optimizer=SGD(lr=1e-3),
                  iterator=BatchIterator(batch_size=5),
                  num_epochs=n_epochs)
for x, y in zip(inputs, targets):
    predicted = net.forward(x)
    print(x, predicted, y)
Beispiel #5
0
"""
The canonical example of a function that can't
be learned with a simple linear model is XOR
"""

import numpy as np

from joelnet.train import train
from joelnet.nn import NeuralNet
from joelnet.layers import Linear, Tanh

inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

targets = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

net = NeuralNet([
    Linear(input_size=2, output_size=2),
    Tanh(),
    Linear(input_size=2, output_size=2)
])

train(net, inputs, targets)

for x, y in zip(inputs, targets):
    predicted = net.forward(x)
    print(x, predicted, y)
Beispiel #6
0
from joelnet.layers import Sigmoid  # tim def'ned, sept16

# logical xor is defined as:
#   xor(bool1,bool2) := false if bool1==bool2 else true
inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
targets = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

# OR: USE SIGMOID ACTIVATION LAYER I IMPLEMENTED
# NOTE: NOT QUITE AS ACCURATE, BUT PRETTY CLOSE...
# Sigmoid() # instead of Tanh()

# instantiate the net (supply layers as iterable, here a list)
net = NeuralNet([
    # layer 1: input, takes TODO
    Linear(input_size=2, output_size=2),
    # layer 2: activation layer, hyperbolic tangent
    Tanh(),
    # layer 3: output, returns TODO
    Linear(input_size=2, output_size=2)
])

# check out attrs of the net + its class
#   inspect.signature(net.backward)
#   vars(NeuralNet)
#
# NeuralNet methods:
#   - .forward(inputs): propogate inputs to next layer
#   - .backward(grad): propogate gradients to previous layer
#   - .params_and_grads(): generator yielding params and gradients
train(net, inputs, targets)

for x, y in zip(inputs, targets):
Beispiel #7
0
eps = 5
for j in range(int(5)):
    inputs.append([j])
inputs = np.array(inputs)
targets = inputs**2

np.random.seed(20)

net = NeuralNet([
    Linear(input_size=1,
           output_size=2,
           weights=np.random.randn(1, 2),
           biases=np.random.randn(2)),
    reLu(),
    Linear(input_size=2,
           output_size=2,
           weights=np.random.randn(2, 2),
           biases=np.random.randn(2)),
    reLu(),
    Linear(input_size=2,
           output_size=1,
           weights=np.random.randn(2, 1),
           biases=np.random.randn(1))
])

start_time = time.time()
try:
    loss_list, eval_list = train(net,
                                 inputs,
                                 targets,
                                 loss=MSE(),
                                 optimizer=LM_cond(1e15),
y_test = np.array(y_test)

inputs = X_train
targets = np.array(y_train)
np.random.seed(2)
net = NeuralNet([
    Linear(input_size=30,
           output_size=24,
           weights=np.random.randn(30, 24),
           biases=np.random.randn(24)),
    Tanh(),
    Linear(input_size=24,
           output_size=30,
           weights=np.random.randn(24, 30),
           biases=np.random.randn(30)),
    Tanh(),
    Linear(input_size=30,
           output_size=35,
           weights=np.random.randn(30, 35),
           biases=np.random.randn(35)),
    Tanh(),
    Linear(input_size=35,
           output_size=1,
           weights=np.random.randn(35, 1),
           biases=np.random.randn(1)),
    Sigmoid()
])

# net = NeuralNet([
#     Linear(input_size=30, output_size=2),
#     Tanh(),
#     Linear(input_size=2, output_size=1),
Beispiel #9
0
    return [x >> i & 1 for i in range(num_bits)]
NUM_ENCODE_BITS = 10
NUM_EPOCHS = 10000
inputs = np.array([
    binary_encode(x, NUM_ENCODE_BITS)
    for x in range(101, 1024)
])

targets = np.array([
    fizz_buzz_encode(x)
    for x in range(101, 1024)
])

net = NeuralNet([
    Linear(input_size=NUM_ENCODE_BITS, output_size=50),
    Tanh(),
    Linear(input_size=50, output_size=4)
])

train(net=net,
      inputs=inputs,
      targets=targets,
      num_epochs=NUM_EPOCHS,
      optimizer=SGD(lr=0.001))

for x in range(1, 101):
    predicted = net.forward(inputs=binary_encode(x))
    predicted_idx = np.argmax(predicted) # largest value is predicted class
    actual_idx = np.argmax(fizz_buzz_encode(x))
    labels = [str(x), 'fizz', 'buzz', 'fizzbuzz']
    print(x, labels[predicted_idx], labels[actual_idx])
Beispiel #10
0
import matplotlib.pyplot as plt
from joelnet.train import train
from joelnet.nn import NeuralNet
from joelnet.layers import Linear, Tanh, Sigmoid, reLu
from joelnet.optim import Optimizer, SGD, Adam
from joelnet.data import BatchIterator
from joelnet.loss import Log_loss

inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

targets = np.array([[0], [1], [1], [0]])

net = NeuralNet([
    Linear(input_size=2, output_size=4),
    Sigmoid(),
    Linear(input_size=4, output_size=4),
    Sigmoid(),
    Linear(input_size=4, output_size=1),
    Sigmoid()
])

n_epochs = 10000
loss_list = train(net,
                  inputs,
                  targets,
                  loss=Log_loss(),
                  optimizer=SGD(lr=1e-5),
                  iterator=BatchIterator(4),
                  num_epochs=n_epochs)

for x, y in zip(inputs, targets):
    predicted = net.forward(x)