Beispiel #1
0
 def test_forward_pass(self):
     network = NeuralNetwork(3, 2, 1, 0.5)
     final_out, hidden_out = network.forward_pass_train(inputs)
     self.assertIsNotNone(final_out)
     self.assertIsNotNone(hidden_out)
     self.assertEqual((1,2),np.shape(hidden_out))
     self.assertEqual((1,1),np.shape(final_out))
Beispiel #2
0
    def test_run(self):
        # Test correctness of run method
        network = NeuralNetwork(3, 2, 1, 0.5)
        network.weights_input_to_hidden = test_w_i_h.copy()
        network.weights_hidden_to_output = test_w_h_o.copy()

        self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
Beispiel #3
0
 def test_backprop(self):
     network = NeuralNetwork(3, 4, 1, 0.5)
     final_out, hidden_out = network.forward_pass_train(inputs)
     deltawOut = np.zeros(np.shape(network.weights_hidden_to_output))
     #self.assertEqual(np.shape(deltawOut),np.shape(test_w_h_o))
     deltawHidden = np.zeros(np.shape(network.weights_input_to_hidden))
     #self.assertEqual(np.shape(deltawHidden), np.shape(test_w_i_h))
     deltaWIH, deltaWHO = network.backpropagation(final_out, hidden_out, inputs, targets, deltawHidden,deltawOut)
     self.assertIsNotNone(deltaWIH)
     self.assertIsNotNone(deltaWHO)
def get_loss(params):
    iterations, learning_rate, hidden_nodes = params
    N_i = train_features.shape[1]
    network = NeuralNetwork(N_i, hidden_nodes, 1, learning_rate)

    for ii in range(iterations):
        batch = np.random.choice(train_features.index, size=128)
        X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']

        network.train(X, y)

    return MSE(network.run(val_features).T, val_targets['cnt'].values)
def train():
    iterations = FLAGS.iterations or 3000
    learning_rate = FLAGS.learning_rate or 1
    hidden_nodes = FLAGS.hidden_nodes or 12
    output_nodes = 1

    N_i = train_features.shape[1]
    network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)

    losses = {'train': [], 'validation': []}
    print('iterations', FLAGS)
    for ii in range(iterations):
        iteration(network, losses, ii)
Beispiel #6
0
 def test_train(self):
     # Test that weights are updated correctly on training
     network = NeuralNetwork(3, 2, 1, 0.5)
     network.weights_input_to_hidden = test_w_i_h.copy()
     network.weights_hidden_to_output = test_w_h_o.copy()
     
     network.train(inputs, targets)
     self.assertTrue(np.allclose(network.weights_hidden_to_output, 
                                 np.array([[ 0.37275328],[-0.03172939]])))
     self.assertTrue(np.allclose(network.weights_input_to_hidden,
                                 np.array([[ 0.10562014, -0.20185996], 
                                           [0.39775194, 0.50074398], 
                                           [-0.29887597, 0.19962801]])))
    def test_train(self):
        # Test that weights are updated correctly on training
        network = NeuralNetwork(3, 2, 1, 0.5)
        network.weights_input_to_hidden = test_w_i_h.copy()
        network.weights_hidden_to_output = test_w_h_o.copy()

        network.train(inputs, targets)

        self.assertTrue(
            np.allclose(network.weights_hidden_to_output,
                        np.array([[0.41434416], [0.00729889], [0.32494378]])))
        self.assertTrue(
            np.allclose(
                network.weights_input_to_hidden,
                np.array([[0.10883301, -0.20292324, 0.30587281],
                          [0.3964668, 0.5011693, 0.29765088],
                          [-0.2982334, 0.19941535, 0.30117456]])))
Beispiel #8
0
 def test_activation(self):
     network = NeuralNetwork(3, 2, 1, 0.5)
     # Test that the activation function is a sigmoid
     self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
Beispiel #9
0
def runTests():
	import numpy as np
	import pandas as pd
	import matplotlib.pyplot as plt
	data_path = 'Bike-Sharing-Dataset/hour.csv'

	rides = pd.read_csv(data_path)
	rides[:24*10].plot(x='dteday', y='cnt')
	dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
	for each in dummy_fields:
	    dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
	    rides = pd.concat([rides, dummies], axis=1)

	fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 
	                  'weekday', 'atemp', 'mnth', 'workingday', 'hr']

	data = rides.drop(fields_to_drop, axis=1)
	quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
	# Store scalings in a dictionary so we can convert back later
	scaled_features = {}
	for each in quant_features:
	    mean, std = data[each].mean(), data[each].std()
	    scaled_features[each] = [mean, std]
	    data.loc[:, each] = (data[each] - mean)/std

	# Save data for approximately the last 21 days 
	test_data = data[-21*24:]

	# Now remove the test data from the data set 
	data = data[:-21*24]

	# Separate the data into features and targets
	target_fields = ['cnt', 'casual', 'registered']
	features, targets = data.drop(target_fields, axis=1), data[target_fields]
	test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
	# Hold out the last 60 days or so of the remaining data as a validation set
	train_features, train_targets = features[:-60*24], targets[:-60*24]
	val_features, val_targets = features[-60*24:], targets[-60*24:]
	from my_answers import NeuralNetwork
	def MSE(y, Y):
	    return np.mean((y-Y)**2)

	inputs = np.array([[0.5, -0.2, 0.1]])
	targets = np.array([[0.4]])
	test_w_i_h = np.array([[0.1, -0.2],
	                       [0.4, 0.5],
	                       [-0.3, 0.2]])

	test_w_h_o = np.array([[0.3],
	                       [-0.1]])

	network = NeuralNetwork(3, 2, 1, 0.5)
	network.weights_input_to_hidden = test_w_i_h.copy()
	network.weights_hidden_to_output = test_w_h_o.copy()


	network.train(inputs, targets)

	network = NeuralNetwork(3, 2, 1, 0.5)
	network.weights_input_to_hidden = test_w_i_h.copy()
	network.weights_hidden_to_output = test_w_h_o.copy()


	network.run(inputs)
# In a model where all the weights are optimized, the more hidden nodes you have, the more accurate the predictions of the model will be.  (A fully optimized model could have weights of zero, after all.) However, the more hidden nodes you have, the harder it will be to optimize the weights of the model, and the more likely it will be that suboptimal weights will lead to overfitting. With overfitting, the model will memorize the training data instead of learning the true pattern, and won't generalize well to unseen data.
#
# Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.  You'll generally find that the best number of hidden nodes to use ends up being between the number of input and output nodes.

# In[ ]:

import sys

####################
### Set the hyperparameters in you myanswers.py file ###
####################

from my_answers import iterations, learning_rate, hidden_nodes, output_nodes

N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)

losses = {'train': [], 'validation': []}
for ii in range(iterations):
    # Go through a random batch of 128 records from the training data set
    batch = np.random.choice(train_features.index, size=128)
    X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']

    network.train(X, y)

    # Printing out the training progress
    train_loss = MSE(
        network.run(train_features).T, train_targets['cnt'].values)
    val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
    sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii /
                                                  float(iterations)) +
Beispiel #11
0
import numpy as np
from my_answers import NeuralNetwork

inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]])
test_w_h_o = np.array([[0.3], [-0.1]])

network = NeuralNetwork(3, 2, 1, 0.5)

network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()

network.train(inputs, targets)

# -----------------------------

import unittest

inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]])
test_w_h_o = np.array([[0.3], [-0.1]])


class TestMethods(unittest.TestCase):

    ##########
    # Unit tests for data loading
    ##########
Beispiel #12
0
y=np.array(data['Attrition'])

X_train, test_features, y_train, test_targets = cross_validation.train_test_split(X,y, test_size=0.2)

train_features,val_features, train_targets, val_targets = cross_validation.train_test_split(X_train,y_train,test_size=0.2)

print(train_features.shape)


import sys

from my_answers import iterations, learning_rate, hidden_nodes, output_nodes


N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)

losses = {'train':[], 'validation':[]}
for ii in range(iterations):
    # Go through a random batch of 128 records from the training data set
    #batch = np.random.choice(train_features, size=128)
    X, y = train_features, train_targets
                             
    network.train(X, y)
    
    # Printing out the training progress
    train_loss = MSE(network.run(train_features).T, train_targets)
    val_loss = MSE(network.run(val_features).T, val_targets)
    sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
                     + "% ... Training loss: " + str(train_loss)[:5] \
                     + " ... Validation loss: " + str(val_loss)[:5])
Beispiel #13
0
import sys
import torch
import torch.nn as nn
import numpy as np

from my_answers import NeuralNetwork

####################
### Set the hyperparameters in you myanswers.py file ###
####################

from my_answers import iterations, learning_rate, hidden_nodes, output_nodes

N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
print(network)

from torch import optim
# specify loss
criterion = nn.MSELoss()
# specify optimizer
optimizer = optim.SGD(network.parameters(), lr = learning_rate)
#optimizer = optim.Adam(network.parameters(), lr = learning_rate)

losses = {'train':[], 'validation':[]}
last_lr = learning_rate
for ii in range(iterations):      

    # varying learning rate for SGD
    if ii == 1500: