Пример #1
0
def crossValidation(trainingData,
                    atr,
                    days,
                    DAYS_AHEAD,
                    intervals=10,
                    EPOCHS=300,
                    LEARNING_RATE=0.2):
    reshaped_x = np.reshape(trainingData, (len(trainingData), 1, atr * days))
    reshaped_y = np.reshape(trainingData[:, 2], (len(trainingData), 1, 1))

    x = makeBatch(reshaped_x, intervals)
    y = makeBatch(reshaped_y, intervals)

    errors = []
    for i in range(len(x)):
        x_train = getSelection(x, i)[:-DAYS_AHEAD]
        y_train = getSelection(y, i)[DAYS_AHEAD:]

        x_test = x[i][:-DAYS_AHEAD]
        y_test = y[i][DAYS_AHEAD:]

        inputSize = 2 * days
        outputSize = int(inputSize / 2)
        net = Network()
        net.add(FCLayer(inputSize, outputSize * 3))
        net.add(ActivationLayer(tanh, tanh_prime))
        net.add(FCLayer(outputSize * 3, outputSize))
        net.add(ActivationLayer(tanh, tanh_prime))
        net.add(FCLayer(outputSize, 1))
        net.add(ActivationLayer(tanh, tanh_prime))

        # train
        net.use(mse, mse_prime)
        net.fit(x_train, y_train, epochs=EPOCHS, learning_rate=LEARNING_RATE)

        # test
        out, err = net.predict(x_test, y_test)
        errors.append(err)
        print(err)
    print(np.average(errors))
Пример #2
0
import numpy as np

from network import Network
from fc_layer import FCLayer
from activation_layer import ActivationLayer
#from activations import tanh, tanh_prime
from losses import mse, mse_prime
from activations import sigmoid, sigmoid_prime

# training data
x_train = np.array([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]])
y_train = np.array([[[0]], [[1]], [[1]], [[0]]])

# network
net = Network()
net.add(FCLayer(2, 3))
net.add(ActivationLayer(sigmoid, sigmoid_prime))
net.add(FCLayer(3, 1))
net.add(ActivationLayer(sigmoid, sigmoid_prime))

# train
net.use(mse, mse_prime)
cost_, myerr = net.fit(x_train, y_train, epochs=10000, learning_rate=0.2)

# test
out = net.predict(x_train)
print(out)

import matplotlib.pyplot as plt
plt.plot(cost_)
x_train = x_train.reshape(x_train.shape[0], 1, 28 * 28)
x_train = x_train.astype('float32')
x_train /= 255
# encode output which is a number in range [0,9] into a vector of size 10
# e.g. number 3 will become [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
y_train = np_utils.to_categorical(y_train)

# same for test data : 10000 samples
x_test = x_test.reshape(x_test.shape[0], 1, 28 * 28)
x_test = x_test.astype('float32')
x_test /= 255
y_test = np_utils.to_categorical(y_test)

# Network
net = Network()
net.add(FCLayer(28 * 28,
                100))  # input_shape=(1, 28*28)    ;   output_shape=(1, 100)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(100, 50))  # input_shape=(1, 100)      ;   output_shape=(1, 50)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(50, 10))  # input_shape=(1, 50)       ;   output_shape=(1, 10)
net.add(ActivationLayer(tanh, tanh_prime))

# train on 1000 samples
# as we didn't implemented mini-batch GD, training will be pretty slow if we update at each iteration on 60000 samples...
net.use(mse, mse_prime)
net.fit(x_train[0:1000], y_train[0:1000], epochs=50, learning_rate=0.01)

# test on 3 samples
out = net.predict(x_test[0:3])
print("\n")
print("predicted values : ")
Пример #4
0
def visualise_image(data, num_image):
    image = np.asarray(data[num_image].squeeze())
    plt.imshow(image)
    plt.show()


x_train = import_image('train-images-idx3-ubyte.gz', 6000)
x_train /= 255
y_train = import_labels('train-labels-idx1-ubyte.gz', 6000)
x_test = import_image('t10k-images-idx3-ubyte.gz', 1000)
x_test /= 255
y_test = import_labels('t10k-labels-idx1-ubyte.gz', 1000)

y_train = to_categorical(y_train)  #copied from tensorflow.keras.np_utils
y_test = to_categorical(y_test)

#x_train = np.asarray(x_train)

#training
net = Network()
net.add(FCLayer(image_size**2,
                100))  #input (1, input_shape**2)  , output (1,100)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(100, 50))  #input (1,100 )  , output (1, 50)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(50, 10))  #input (1,50) ,   output(1,10)
net.add(ActivationLayer(tanh, tanh_prime))

net.use(mse, mse_prime)
net.fit(x_train[0:1000], y_train[0:1000], epochs=35, learning_rate=0.1)
Пример #5
0
                    t2.append(t[k])
        return quicksort(t1) + [pivot] + quicksort(t2)


## init
population = 10  #nombre total de cas tester en meme temps
nombre = 1000  #nombre de géneration
q = 0.3  #facteur de mutation (entre 0 et 1)

x_train = np.array([[[0, 0]], [[0, 1]], [[1, 0]],
                    [[1, 1]]])  #ce qui est en entrée
##init des reseaux de neurones aléatoirement
liste = []
for k in range(0, population):  #on crée "population" réseaux de neurones
    liste.append([0, Network()])
    liste[-1][1].add(FCLayer(2, 3))  # construction des couches
    liste[-1][1].add(ActivationLayer(tanh))  #choix fonction d'activation
    liste[-1][1].add(FCLayer(3, 3))
    liste[-1][1].add(ActivationLayer(tanh))
    liste[-1][1].add(FCLayer(3, 1))
    liste[-1][1].add(ActivationLayer(tanh))
    n = 0
while (n < nombre):  #tant que l'on n'est pas à la "nombre"ème génerations
    n += 1
    for k in range(0, population):  #pour tout les réseaux de neurones
        y_trouve = []
        y_voulu = []
        for x in x_train:  #on met les 4 entrées
            y = liste[k][1].predict(x)  #on teste les reseaux de neurones
            y_attendu = x[0][0] ^ x[0][1]  #la valeur que l'on doit avoir
            y_voulu.append(y_attendu)  #on met les resultats dans un tableau
Пример #6
0
x = df[ls[1:-1]].to_numpy()[:, np.newaxis]
y = df[ls[-1]].to_numpy().reshape(-1, 1)
#Split into training and testing data
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.33,
                                                    random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
    x_train, y_train, test_size=0.15,
    random_state=42)  #Validation data of 10% (15% of 67%)
np.random.seed(42)
#Initialise Neural Network
net = Network()
n_hidden_nodes = 500
#Input layer
net.add(FCLayer(x.shape[-1], n_hidden_nodes))
net.add(ActivationLayer(tanh, tanh_prime))
#hidden layers
net.add(FCLayer(n_hidden_nodes, n_hidden_nodes))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(n_hidden_nodes, n_hidden_nodes))
net.add(ActivationLayer(tanh, tanh_prime))
#Output layer
net.add(FCLayer(n_hidden_nodes, 1))
#Took out activation function here
net.use(mse, mse_prime)
net.fit(x_train, y_train, epochs=100, learning_rate=0.001)

y_pred = np.array(net.predict(x_test)).reshape(-1, 1)

np.savetxt('data_test500Trial.csv', y_pred, delimiter=',')
Пример #7
0
    def test_update_parameters(self):
        # test example from coursera deeplearning.ai
        np.random.seed(1)
        W1 = cp.array(np.random.randn(2, 3))
        b1 = cp.array(np.random.randn(2, 1))
        W2 = cp.array(np.random.randn(3, 3))
        b2 = cp.array(np.random.randn(3, 1))
        dW1 = cp.array(np.random.randn(2, 3))
        db1 = cp.array(np.random.randn(2, 1))
        dW2 = cp.array(np.random.randn(3, 3))
        db2 = cp.array(np.random.randn(3, 1))

        l1 = FCLayer(3, 2)
        l1.W = W1
        l1.b = b1
        l1.dW = dW1
        l1.db = db1
        l2 = FCLayer(3, 3)
        l2.W = W2
        l2.b = b2
        l2.dW = dW2
        l2.db = db2
        l1.update_parameters(0.01, 2)
        l2.update_parameters(0.01, 2)
        self.assertAlmostEqual(float(l1.W[0, 0]), 1.63178673)
        self.assertAlmostEqual(float(l2.sb[0, 0]), 5.49507194e-05)
        self.assertAlmostEqual(float(l1.b[1, 0]), -0.75376553)
Пример #8
0
 def test_init_parameters(self):
     np.random.seed(3)
     layer = FCLayer(2, 4)
Пример #9
0
    def test_backward_propagation(self):
        # test example from coursera deeplearning.ai
        np.random.seed(3)
        AL = cp.array(np.random.randn(1, 2))
        Y = cp.array([[1, 0]])
        dA = -(Y / AL - (1 - Y) / (1 - AL))
        A1 = cp.array(np.random.randn(4, 2))
        W1 = cp.array(np.random.randn(3, 4))
        b1 = cp.array(np.random.randn(3, 1))
        Z1 = cp.array(np.random.randn(3, 2))

        A2 = cp.array(np.random.randn(3, 2))
        W2 = cp.array(np.random.randn(1, 3))
        b2 = cp.array(np.random.randn(1, 1))
        Z2 = cp.array(np.random.randn(1, 2))

        l1 = FCLayer(4, 3, activation='relu')
        l1.Z = Z1
        l1.W = W1
        l1.b = b1
        l1.X = A1
        l2 = FCLayer(3, 2, activation='sigmoid')
        l2.Z = Z2
        l2.W = W2
        l2.b = b2
        l2.X = A2

        dA = l2.backward(dA)
        dA = l1.backward(dA)
        self.assertAlmostEqual(0.41010002, float(l1.dW[0, 0]), places=7)
        self.assertAlmostEqual(0.01005865, float(l1.dW[2, 1]), places=7)
        self.assertAlmostEqual(-0.02835349, float(l1.db[2, 0]), places=7)
Пример #10
0
    def test_forward_propagation(self):
        # test example from coursera deeplearning.ai
        np.random.seed(6)
        X = cp.array(np.random.randn(5, 4))
        W1 = cp.array(np.random.randn(4, 5))
        b1 = cp.array(np.random.randn(4, 1))
        W2 = cp.array(np.random.randn(3, 4))
        b2 = cp.array(np.random.randn(3, 1))
        W3 = cp.array(np.random.randn(1, 3))
        b3 = cp.array(np.random.randn(1, 1))

        lay1 = FCLayer(5, 4, activation='relu')
        lay1.W = W1
        lay1.b = b1
        lay2 = FCLayer(4, 3, activation='relu')
        lay2.W = W2
        lay2.b = b2
        lay3 = FCLayer(3, 1, activation='sigmoid')
        lay3.W = W3
        lay3.b = b3
        A = lay1.forward(X)
        A = lay2.forward(A)
        A = lay3.forward(A)
        self.assertAlmostEqual(0.03921668, float(A[0, 0]), places=7)
        self.assertAlmostEqual(0.19734387, float(A[0, 2]), places=7)
Пример #11
0
                     (len(trainingData), 1, ATR * DAYS))[0:SIZE - DAYS_AHEAD]
y_train = np.reshape(trainingData[:, 2],
                     (len(trainingData), 1, 1))[DAYS_AHEAD:SIZE]
time_train = dataArray[DAYS_AHEAD:SIZE]
x_test = np.reshape(trainingData,
                    (len(trainingData), 1, ATR * DAYS))[SIZE:-DAYS_AHEAD]
y_test = np.reshape(trainingData[:, 2],
                    (len(trainingData), 1, 1))[SIZE + DAYS_AHEAD:]
time_test = dataArray[SIZE + DAYS + DAYS_AHEAD:]
inputSize = len(x_train)

# network
inputSize = ATR * DAYS
outputSize = int(inputSize / 2)
net = Network()
net.add(FCLayer(inputSize, outputSize * 3))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(outputSize * 3, outputSize))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(outputSize, 1))
net.add(ActivationLayer(tanh, tanh_prime))

# train
net.use(mse, mse_prime)
net.fit(x_train, y_train, epochs=EPOCHS, learning_rate=LEARNING_RATE)

# test
out, err = net.predict(x_test, y_test)
print(err)
outTrain, err = net.predict(x_train, y_train)
Пример #12
0
from network import Network
from fc_layer import FCLayer
from softmax_layer import SoftmaxLayer
from activation_layer import ActivationLayer
from activations import tanh, tanh_prime, softmax, softmax_prime
from losses import mse, mse_prime, cross_entropy, cross_entropy_prime

# TRAINING DATA
# input x_train shape is (4,1,2):
x_train = np.array([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]])
#y_train =np.array([ [[0]], [[1]], [[1]], [[0]] ])
y_train = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])  # 1 hot vector

# NETWORK
net = Network()
net.add(FCLayer(2, 3))  # input co 2 dimension [0,0], 3 neron
# FCLayer sử dụng hàm activate là tanh, đạo hàm của tanh là tanh_prime:
net.add(ActivationLayer(tanh, tanh_prime))
#net.add(FCLayer(3, 1)) # input co 3 dimension, 1 neron
net.add(FCLayer(3, 2))  # input co 3 dimension, 2 neron
#net.add(ActivationLayer(tanh, tanh_prime))
net.add(ActivationLayer(softmax, softmax_prime))
"""
Thêm lớp ở trên thì bị lỗi sau:
Traceback (most recent call last):
  File "example_xor.py", line 36, in <module>
    net.fit(x_train, y_train, epochs=1000, learning_rate=0.1)
  File "E:\MyProg\Python\medium_nn\network.py", line 58, in fit
    error = layer.backward_propagation(error, learning_rate)
  File "E:\MyProg\Python\medium_nn\fc_layer.py", line 29, in backward_propagation
    weights_error = np.dot(self.input.T, output_error)
Пример #13
0
x_train /= 255

#ecnoding output
y_train = np_utils.to_categorical(y_train)

#same for test data
x_test = x_test.reshape(x_test.shape[0], 1, 28 * 28)
x_test = x_test.astype('float32')
x_test /= 255

#ecnoding output
y_test = np_utils.to_categorical(y_test)

#Network
net = Network()
net.add(FCLayer(28 * 28, 50))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(50, 10))
net.add(ActivationLayer(tanh, tanh_prime))

net.use(mse, mse_prime)
net.fit(x_train[:1000], y_train[:1000], epochs=50, learning_rate=0.1)

#test on 3 samples
out = net.predict(x_test[:1])
print('\n')
print('true values: ')
print(y_test[0:1])
print('\n')
print('predicted values: ')
y_train = np_utils.to_categorical(y_train)

# same for test data : 10000 samples
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_test = x_test.astype('float32')
x_test /= 255
y_test = np_utils.to_categorical(y_test)

# Network
net = Network()
net.add(ConvLayer((28, 28, 1), (3, 3),
                  1))  # input_shape=(28, 28, 1)   ;   output_shape=(26, 26, 1)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(
    FlattenLayer())  # input_shape=(26, 26, 1)   ;   output_shape=(1, 26*26*1)
net.add(FCLayer(26 * 26 * 1,
                100))  # input_shape=(1, 26*26*1)  ;   output_shape=(1, 100)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(100, 10))  # input_shape=(1, 100)      ;   output_shape=(1, 10)
net.add(ActivationLayer(tanh, tanh_prime))

# train on 1000 samples
# as we didn't implemented mini-batch GD, training will be pretty slow if we update at each iteration on 60000 samples...
net.use(mse, mse_prime)
net.fit(x_train[0:1000], y_train[0:1000], epochs=100, learning_rate=0.1)

# test on 3 samples
out = net.predict(x_test[0:3])
print("\n")
print("predicted values : ")
print(out, end="\n")
print("true values : ")
Пример #15
0
    [[0, 0]],
    [[0, 1]],
    [[1, 0]],
    [[1, 1]],
    [[2, 0]],
    [[2, 1]],
    [[3, 0]],
    [[3, 1]]
])

# New state
y_train = np.array([[[1]], [[3]], [[2]], [[0]], [[3]], [[1]], [[0]], [[2]]])

# Network
net = Network()
net.add(FCLayer(2, 4))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(4, 1))

# Train
net.use(mse, mse_prime)
net.fit(x_train, y_train, epochs=4000, learning_rate=0.02)

# Test
out = net.predict(x_train)
x = np.round(out)

# Take user input and perform state transitions
user_input = int(input("Enter the input (0 or 1 or -1):"))
current_state = 0
print("Current state is", current_state)
Пример #16
0
     x_test = x_norm.to_numpy()
     y_true = data_test.drop(columns=[
         'Index', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11',
         '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
         '23', '24', '25', '26', '27', '28', '29', '30'
     ]).to_numpy()
     y_true = np.where(y_true == 'M', 1, 0)
     y = data_train.drop(columns=[
         'Index', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11',
         '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
         '23', '24', '25', '26', '27', '28', '29', '30'
     ]).to_numpy()
     y_train = np.where(y == 'M', 1, 0)
     # network
     net = Network()
     net.add(FCLayer(30, 15))
     net.add(ActivationLayer(tanh, tanh_prime))
     net.add(FCLayer(15, 10))
     net.add(ActivationLayer(tanh, tanh_prime))
     net.add(FCLayer(10, 1))
     net.add(ActivationLayer(tanh, tanh_prime))
     #train
     net.use(mse, mse_prime)
     net.fit(x_train, y_train, epochs=100, learning_rate=0.1)
     #test
     out = net.predict(x_test)
     print(out)
     y_pred = [1 if x > 0.5 else 0 for x in out]
     print("accuracy : ", accuracy_score_(y_pred, y_true))
 else:
     print("Usage : python Network.py train.csv test.csv")