Exemplo n.º 1
0
 def test_xor_gate(self):
     """Simulate XOR gate and ensure working"""
     inputs = [[1.0, 1.0],
               [1.0, 0.0],
               [0.0, 1.0],
               [0.0, 0.0]]
     output_vector = [[0.0],
                      [1.0],
                      [1.0],
                      [0.0]]
     inputs = np.array(inputs, dtype='float32')
     output_vector = np.array(output_vector)
     net = NeuralNetwork(inputs, output_vector)
     net.train()
     output = net.feed(np.array([[0, 1]], dtype='float32'))[0][0]
     output = round(output, 3)
     self.assertAlmostEqual(output, 1)
     output = net.feed(np.array([[1, 0]], dtype='float32'))[0][0]
     output = round(output, 3)
     self.assertAlmostEqual(output, 1)
     output = net.feed(np.array([[0, 0]], dtype='float32'))[0][0]
     output = round(output, 3)
     self.assertAlmostEqual(output, 0)
     output = net.feed(np.array([[1, 1]], dtype='float32'))[0][0]
     output = round(output, 3)
     self.assertAlmostEqual(output, 0)
Exemplo n.º 2
0
def main():
    # load data
    _input = torch.tensor(data.training_input, dtype=torch.float)
    _output = torch.tensor(data.training_expected_output, dtype=torch.float)

    # This section is for plotting ##############################
    gene_array = []
    loss_array = []
    fig, ax = plt.subplots()
    ax.set(xlabel='generation',
           ylabel='mean sum squared error',
           title='Neural network, error loss after each generation')
    # This section is for plotting ##############################

    ANN = NeuralNetwork(i=45, o=10, h=5)  # input,output,hidden layer size
    # weight training
    for i in range(15000):
        # mean sum squared error
        mean_error = torch.mean((_output - ANN(_input))**2).detach().item()
        print("Generation: " + str(i) + " error: " + str(mean_error))
        gene_array.append(i)
        loss_array.append(mean_error)
        ANN.train(_input, _output)

    torch.save(ANN, "algo1.weights")
    ANN = torch.load("14_good.weights")
    test_trained_network(ANN)
    ax.plot(gene_array, loss_array)
    plt.show()
Exemplo n.º 3
0
def main():
    inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    labels = np.array([[0], [1], [1], [0]])

    architecture = (2, 3, 1)
    nn = NeuralNetwork(architecture, activation="sigmoid", cost="mse")

    costs = nn.train(inputs, labels, alpha=1, iterations=5000)
    print("\nCost always decreases:",
          all([costs[i + 1] < costs[i] for i in range(len(costs) - 1)]))
    print("Lowest cost:", min(costs))
    print("\nResults:", "\n", np.round(nn.evaluate(inputs), 0))
    plotCosts(costs)

    # GRAPHING
    plt.style.use(["dark_background"])
    #plt.rc("grid", alpha=0.25)

    start, end = -0.5, 1.5

    fidelity = 0.01
    n = int((end - start) / fidelity)
    points = np.meshgrid(np.linspace(start, end, n + 1),
                         np.linspace(start, end, n + 1))
    values = np.zeros((n + 1, n + 1))

    for i in range(n + 1):
        for j in range(n + 1):
            values[i, j] = nn.evaluate(
                np.array([points[0][i, j], points[1][i, j]]))

    x, y = points
    # RdYlGn
    plt.contourf(x, y, values, np.linspace(0, 1, 51), cmap="jet_r")
    plt.colorbar()
    plt.contour(x,
                y,
                values,
                0.5,
                linewidths=2,
                linestyles="dashed",
                colors="black")

    plt.grid(color="black", alpha=0.25)
    plt.axhline(y=0, color="k", lw=2)
    plt.axvline(x=0, color="k", lw=2)
    plt.title("Neural Network XOR Boundary")
    plt.xlabel("X Axis")
    plt.ylabel("Y Axis")
    plt.show()
Exemplo n.º 4
0
def main(mnist_path, output_path, activation, hl_sizes):
    f = gzip.open(mnist_path, 'rb')
    training_set, test_set = pickle.load(f, encoding='latin1')

    training_examples = transform_examples(training_set)
    test_examples = transform_examples(test_set)

    if activation == "sigmoid":
        activation = neural_net.sigmoid
        d_activation = neural_net.d_sigmoid
    elif activation == "relu":
        activation = neural_net.relu
        d_activation = neural_net.d_relu
    elif activation == "elu":
        activation = neural_net.elu
        d_activation = neural_net.d_elu

    network = NeuralNetwork([PIXEL_COUNT] + hl_sizes + [NUM_CHARACTERS],
                            output_path,
                            act_func=activation,
                            act_func_deriv=d_activation)
    network.train(training_generator(training_examples), test_examples)

    pickle.dump(network, open(output_path, 'wb'))
Exemplo n.º 5
0
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import MinMaxScaler
from neural_net import NeuralNetwork

#1D artificial data
X = np.arange(0, 20).reshape(20, 1) + np.random.randn(20, 1)
y = (np.arange(0, 20) + np.random.randn(20)).reshape(20, 1)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
y = scaler.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=101)
plt.plot(X, y, '*', label='train data')
plt.xlabel('features')
plt.ylabel('labels')
plt.title('DNN Regressor')

nn = NeuralNetwork(mode='regression')
nn.train(X_train, y_train, 1000)
y_pred = nn.predict(X_test)
print()
r2 = r2_score(y_test, y_pred)
print('The R^2 score is:', r2)

plt.plot(X_test, y_pred, 'r*', label='test data')
plt.legend()
Exemplo n.º 6
0

# functions
def n():
    return input('Ingrese un numero: ')


# input data
inputs = np.array([[0, 1, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0], [1, 0, 0],
                   [1, 1, 1], [1, 0, 1]])
# output data
outputs = np.array([[0], [0], [0], [0], [1], [1], [1]])

# create a neural network
NeuralN = NeuralNetwork(inputs, outputs)
NeuralN.train()

# create two new examples to test and predict
example1 = np.array([[1, 1, 0]])
example2 = np.array([[0, 1, 1]])
exampleUser = np.array([n(), n(), n()])

# print and predict the examples
print(NeuralN.predict(example1), '- Correct: ', example1[0][0])
print(NeuralN.predict(example2), '- Correct: ', example2[0][0])
print(NeuralN.predict(exampleUser), '- Correct: ', exampleUser[0][0])

# plot the error over the entire training duration
plt.figure(figsize=(15, 5))
plt.plot(NeuralN.iters_hist, NeuralN.costerror_hist)
plt.xlabel('Iters')
Exemplo n.º 7
0
from neural_net import NeuralNetwork
nn = NeuralNetwork("parkinsons_supervised.csv",
                   "initial_model",
                   model_type='regression')
nn.train()
Exemplo n.º 8
0
years, spots = split_data(data)

# normalise data
norm_spots = norm(spots, 0, 1)

# split for training and validation
train_spots = norm_spots[:200]
valid_spots = norm_spots[200:]

# converted data to matrices - ready to work with it
training_out = np.array(train_spots[N:]).reshape(-1, 1)
training_in = create_input_matrix(train_spots, N)

# learn
neur_net = NeuralNetwork(N)
neur_net.train(training_in, training_out, 100000, 0.0001)
print("Weights after training:")
print(neur_net.synaptic_weights)

# test data preparation 
valid_test_out = np.array(valid_spots[N:]).reshape(-1, 1)
valid_test_in = create_input_matrix(valid_spots, N)

# count error
res = []
for i, line in enumerate(valid_test_in):
    prediciton = neur_net.think(line)
    error = valid_test_out[i-1] - prediciton
    res.append(error**2)
    print(denorm(valid_test_out[i], spots), denorm(prediciton, spots))