def test_forward():
    n = 100
    p = 4
    x = np.ones([p, n])
    net = NeuralNetwork(p, [(10, Sigmoid()), (11, Sigmoid()), (1, Identity())], QuadraticLoss())
    output = net(x)
    assert output.shape == (1, n)
Esempio n. 2
0
    def test_numerical_gradient_checking(self):
        label, image = next(mnist.read())
        ninput = [pixel / 255 for row in image for pixel in row]
        expected = [1 if i == label else 0 for i in range(10)]
        nnet = NeuralNetwork([784, 16, 16, 10])

        epsilon = 1e-5
        numgrad = [np.empty(wmatrix.shape) for wmatrix in nnet.weight]

        for k, wmatrix in enumerate(nnet.weight):
            for i, w in np.ndenumerate(wmatrix):
                wmatrix[i] = w - epsilon
                nnet.feedforward(ninput)
                a = nnet.get_error(expected)
                wmatrix[i] = w + epsilon
                nnet.feedforward(ninput)
                b = nnet.get_error(expected)
                numgrad[k][i] = (b - a) / 2 * epsilon
                wmatrix[i] = w
        error_gradient = nnet.get_error_gradient(expected)

        unit = lambda v: v / norm(v) if (v != 0).any() else np.zeros(v.shape)

        for k in range(len(nnet.weight)):
            ag = error_gradient[k]
            ng = numgrad[k]
            print(f"custom = {norm(unit(ag) - unit(ng))}")
            print(
                f"derived from cs231 = {norm(unit(ag) * norm(ng) - ng) / max(norm(ag), norm(ng))}"
            )
Esempio n. 3
0
    def test_get_random_params(self):
        dlayers = [1024, 32, 64, 47]
        nnet = NeuralNetwork(dlayers)
        params = nnet.get_random_params()

        weights = [n * m for n, m in zip(dlayers, dlayers[1:])]
        biases = dlayers[1:]
        self.assertEqual(len(params), sum(weights) + sum(biases))
        self.assertTrue(all(-GM <= p <= GM for p in params))
def test_backward():
    n = 100
    p = 4
    x = np.ones([p, n])
    y = np.zeros([1, n])
    net = NeuralNetwork(p, [(10, Sigmoid()), (11, Sigmoid()), (1, Identity())], QuadraticLoss())
    net(x)
    net.backward(y)
    assert True
Esempio n. 5
0
def test_trained(params=None, head=100, tail=100):
    "Tests a network with params against first `head` and last `tail` examples"
    params = params if params is not None else load_params()
    nnet = NeuralNetwork(DLAYERS, params)
    mnist_db = list(mnist.read())
    print("[KNOWN]")
    test_and_report_against(nnet, mnist_db[:head])  # Training dataset
    print("[UNKNOWN]")
    test_and_report_against(nnet, mnist_db[-tail:])  # Unknown dataset
Esempio n. 6
0
    def test_create_layers(self):
        dlayers = [1024, 32, 64, 47]
        nnet = NeuralNetwork(dlayers)
        params = nnet.get_random_params()
        nnet.create_layers(params)

        weights = [(n + 1) * m for n, m in zip(dlayers, dlayers[1:])]

        # Weights assertions
        self.assertEqual(len(nnet.weight), len(dlayers) - 1)
        self.assertTrue(
            all(w.size == weights[i] for i, w in enumerate(nnet.weight)))
        self.assertTrue(
            all(w.shape == (dlayers[i - 1] + 1, dlayers[i])
                for i, w in enumerate(nnet.weight, 1)))
        self.assertTrue(
            all(-GM <= p <= GM for w in nnet.weight for p in np.nditer(w)))
Esempio n. 7
0
def backpropagation_main():
    label, image = next(mnist.read())
    ninput = [pixel / 255 for row in image for pixel in row]
    expected = [1 if i == label else 0 for i in range(10)]

    nnet = NeuralNetwork(DLAYERS, params=None)
    # nnet = NeuralNetwork(DLAYERS, params=load_params())
    for i in range(1000000000000):
        guess = nnet.feedforward(ninput)
        cost = nnet.get_error(expected)
        print(f"[{i + 1}] cost = {cost}, guess = {guess}")
        try:
            nnet.backpropagate(expected)
        except KeyboardInterrupt:
            break
    guess = nnet.feedforward(ninput)
    cost = nnet.get_error(expected)
    print(f"[{i + 1}] cost = {cost}")
    save_params(nnet.params)
Esempio n. 8
0
                            noise_prob = pretrain_noise_prob,
                            seed=seed
                            )
    new_layer.train(pretraining_trainset)
    print ' DONE'

    pretrained_Ws += [new_layer.W]
    pretrained_bs += [new_layer.b]

pretrained_Ws += [np.zeros((sizes[-1],len(trainset.metadata['targets'])))]
pretrained_bs += [np.zeros((len(trainset.metadata['targets'],)))]
    
# Construct neural network, with pretrained parameters
myObject = NeuralNetwork(n_epochs=1,
                         lr=lr,
                         dc=dc,
                         sizes=sizes,
                         seed=seed,
                         parameter_initialization=(pretrained_bs,pretrained_Ws))

print "Fine-tuning..."
# Early stopping code
best_val_error = np.inf
best_it = 0
str_header = 'best_it\t'
look_ahead = 5
n_incr_error = 0
for stage in range(1,500+1,1):
    if not n_incr_error < look_ahead:
        break
    myObject.n_epochs = stage
    myObject.train(trainset)
Esempio n. 9
0
from nnet import NeuralNetwork

print "Verifying gradients with sigmoid activation"
m = NeuralNetwork()
m.L2 = 0
m.L1 = 0
m.tanh = False
m.verify_gradients()

print ""
print "Verifying gradients with tanh activation"
m.tanh = True
m.verify_gradients()

print ""
print "Verifying gradients with L2 regularization"
m.L2 = 0.001
m.verify_gradients()

print ""
print "Verifying gradients with L1 regularization"
m.L2 = 0
m.L1 = 0.001
m.verify_gradients()

Esempio n. 10
0
 def __init__(self, actions, weights):
     self.actions = actions
     #number of state variables in FlappyBird
     self.nn = NeuralNetwork([8, 16, 1], weights=weights)
Esempio n. 11
0
def createParent():
    nn = NeuralNetwork([8, 16, 1])
    return nn.weights
Esempio n. 12
0
        trial = adaboost.Adaboost(file_name, None)
        trial.training = file_name
        train_pixels = trial.prepare_data(trial.training)[0]
        weights = trial.train(train_pixels)
        weight_values = weights.values()
        for i in weights:
            weights[i] += 10

        with open(model_file, 'w') as myfile:
            for i in weights:
                if i == trial.learner1:
                    myfile.write('%s %.9f\n' % ('learner1', weights[i]))
                if i == trial.learner2:
                    myfile.write('%s %.9f\n' % ('learner2', weights[i]))
    elif model == 'nnet':
        nnet = NeuralNetwork()
        nnet.train(file_name, model_file, epochs=10000)
    elif model == 'best':
        best = Best()
        best.train(file_name, model_file, epochs=10000)
    else:
        print 'Specified model not found!!'
else:
    if model == 'nearest' or model == 'nnet' or model == 'best':
        if model_file.endswith('.txt'):
            model_file = model_file + '.npy'

    if model == 'nearest':
        knn = Knn()
        knn.test(file_name, model_file)
    elif model == 'nnet':
Esempio n. 13
0
import numpy as np
import matplotlib.pyplot as plt
from nnet import NeuralNetwork
import activation_functions as funcs

config = [1, 5, 5, 1]
net = NeuralNetwork(config,
                    learning_rate=0.1,
                    act_func=funcs.sigmoid,
                    df_act_func=funcs.df_sigmoid)


def f(x):
    return x**2


vf = np.vectorize(f)

x = np.array(np.linspace(-1, 1, num=10), ndmin=2)
y = vf(x)

epochs = 5000
err = net.fit_raw(x, y, epochs, True, 10)

test_sample = np.array([0.1])
y_pred = net.predict(test_sample)

plt.plot(np.array(range(epochs) + 1), np.array(err))
plt.show()
Esempio n. 14
0
 def train(self, file_name, model_file, epochs):
     nnet = NeuralNetwork()
     nnet.train(file_name, model_file, epochs)
Esempio n. 15
0
 def test(self, file_name, model_file, output_file="best_output.txt"):
     nnet = NeuralNetwork()
     nnet.test(file_name, model_file, output_file)
Esempio n. 16
0
def main(argv):    
    if args.seed:
        np.random.seed(args.seed)

    map = Map(MAP_WIDTH, MAP_HEIGHT)
    net = NeuralNetwork(2, args.layer_neurons, 1, args.hidden_layers, args.bias)
    print net
    if  args.train:
        # тренировочные данные
        train_d0, train_d1 = map.dataset(0, MAP_WIDTH + MAP_HEIGHT), \
                             map.dataset(1, MAP_WIDTH + MAP_HEIGHT)
        td0 = np.array([[0]] * train_d0.shape[0], dtype=float)
        td1 = np.array([[1]] * train_d1.shape[0], dtype=float)
        t = np.concatenate((td0, td1), axis=0) # уже нормализован
        # вход
        x = np.concatenate((train_d0, train_d1), axis=0)
        x_normalized = x / np.amax(x, axis=0)
        
        print 'Training...'
        if args.logging:
            with open('training.log', 'w') as f:
                for epoch in xrange(args.epochs):
                    f.write('Epoch {}\n'.format(epoch))
                    f.write("Input:\n{}\n".format(x_normalized.T))
                    f.write("Actual Output:\n{}\n".format(t.T))
                    f.write("Predicted Output:\n{}\n".format(np.round(net.forward(x_normalized).T)))
                    f.write("Loss:\n{}\n\n".format(str(np.mean(np.square(t - net.forward(x_normalized))))))
                    net.train(x_normalized, t)
        else:
            for epoch in xrange(args.epochs):
                net.train(x_normalized, t, args.alpha, args.train_speed)
        print "Saving weights..."
        net.save_weights(W_PREFIX)
        print 'Done.'
    else:
        train_d0 = train_d1 = np.array([])
        if os.path.exists('{}_0.w.txt'.format(W_PREFIX)):
           print "Loading weights..."
           net.load_weights(W_PREFIX)
           print 'Done.'
        else:
            print "No weights were found!"

    if args.seed:
        np.random.seed(args.seed + 1)
    
    # вход
    zds0, zds1 = np.random.randint(2, 20), np.random.randint(2, 20)
    d0, d1 = map.dataset(0, zds0), map.dataset(1, zds1)
    x = np.concatenate((d0, d1), axis=0)
    x_normalized = x / np.amax(x, axis=0)
    # ожидаемые данные для проверки
    td0 = np.array([[0]] * d0.shape[0], dtype=float)
    td1 = np.array([[1]] * d1.shape[0], dtype=float)
    t = np.concatenate((td0, td1), axis=0) # уже нормализован
    # выход
    y = np.round(net.predict(x_normalized))
    if args.verbose:
        print "Input:"
        print x
        print "Output (Expected):"
        print t
        print "Output (Actual):"
        print y

    res = (y == t)
    if res.all():
        print "\nAll Good!"
    else:
        print "{}% are good!".format(res.sum() * 100 / len(res))

    if args.plotting:
        # фильтрация 'попаданий' и 'промахов'
        good = []
        bad = []
        for i, v in enumerate(res):
            if v:
                good.append(x[i])
            else:
                bad.append(x[i])
        map.plot(np.array(good), np.array(bad), train_d0, train_d1, args.plot_name)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from nnet import NeuralNetwork
import activation_functions as funcs

np.random.seed(1)


def remap(value, low1, high1, low2, high2):
    return low2 + (value - low1) * (high2 - low2) / (high1 - low1)


config = [2, 20, 15, 20, 1]
network = NeuralNetwork(config, learning_rate=0.00005, act_func=funcs.tanh, df_act_func=funcs.df_illogical)

Xn = 30
Yn = 30

X = np.linspace(-0.9, 0.9, Xn).reshape(-1, 1)
Y = np.linspace(-0.9, 0.9, Yn).reshape(-1, 1)


def f(x, y):
    return np.sin(3 * x) * np.cos(2 * y) * 0.5


X, Y = np.meshgrid(X, Y)
XY_train = np.stack((X.ravel(), Y.ravel()), axis=-1)

res = f(X, Y)