Exemplo n.º 1
0
 def test_train(self):
     # Test that weights are updated correctly on training
     network = NeuralNetwork(3, 2, 1, 0.5)
     network.weights_input_to_hidden = test_w_i_h.copy()
     network.weights_hidden_to_output = test_w_h_o.copy()
     
     network.train(inputs, targets)
     self.assertTrue(np.allclose(network.weights_hidden_to_output, 
                                 np.array([[ 0.37275328], 
                                           [-0.03172939]])))
     self.assertTrue(np.allclose(network.weights_input_to_hidden,
                                 np.array([[ 0.10562014, -0.20185996], 
                                           [0.39775194, 0.50074398], 
                                           [-0.29887597, 0.19962801]])))
Exemplo n.º 2
0
def get_loss(params):
    iterations, learning_rate, hidden_nodes = params
    N_i = train_features.shape[1]
    network = NeuralNetwork(N_i, hidden_nodes, 1, learning_rate)
    loss_values = []

    for ii in range(iterations + 1):
        batch = np.random.choice(train_features.index, size=128)
        X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']

        network.train(X, y)
        if ii % 100 == 0 and ii > 0:
            loss_values.append(
                MSE(network.run(val_features).T, val_targets['cnt'].values))

    return loss_values
    def test_train(self):
        # Test that weights are updated correctly on training
        network = NeuralNetwork(3, 2, 1, 0.5)
        network.weights_input_to_hidden = test_w_i_h.copy()
        network.weights_hidden_to_output = test_w_h_o.copy()

        network.train(inputs, targets)

        self.assertTrue(
            np.allclose(network.weights_hidden_to_output,
                        np.array([[0.41434416], [0.00729889], [0.32494378]])))
        self.assertTrue(
            np.allclose(
                network.weights_input_to_hidden,
                np.array([[0.10883301, -0.20292324, 0.30587281],
                          [0.3964668, 0.5011693, 0.29765088],
                          [-0.2982334, 0.19941535, 0.30117456]])))
Exemplo n.º 4
0
def runTests():
	import numpy as np
	import pandas as pd
	import matplotlib.pyplot as plt
	data_path = 'Bike-Sharing-Dataset/hour.csv'

	rides = pd.read_csv(data_path)
	rides[:24*10].plot(x='dteday', y='cnt')
	dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
	for each in dummy_fields:
	    dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
	    rides = pd.concat([rides, dummies], axis=1)

	fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 
	                  'weekday', 'atemp', 'mnth', 'workingday', 'hr']

	data = rides.drop(fields_to_drop, axis=1)
	quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
	# Store scalings in a dictionary so we can convert back later
	scaled_features = {}
	for each in quant_features:
	    mean, std = data[each].mean(), data[each].std()
	    scaled_features[each] = [mean, std]
	    data.loc[:, each] = (data[each] - mean)/std

	# Save data for approximately the last 21 days 
	test_data = data[-21*24:]

	# Now remove the test data from the data set 
	data = data[:-21*24]

	# Separate the data into features and targets
	target_fields = ['cnt', 'casual', 'registered']
	features, targets = data.drop(target_fields, axis=1), data[target_fields]
	test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
	# Hold out the last 60 days or so of the remaining data as a validation set
	train_features, train_targets = features[:-60*24], targets[:-60*24]
	val_features, val_targets = features[-60*24:], targets[-60*24:]
	from my_answers import NeuralNetwork
	def MSE(y, Y):
	    return np.mean((y-Y)**2)

	inputs = np.array([[0.5, -0.2, 0.1]])
	targets = np.array([[0.4]])
	test_w_i_h = np.array([[0.1, -0.2],
	                       [0.4, 0.5],
	                       [-0.3, 0.2]])

	test_w_h_o = np.array([[0.3],
	                       [-0.1]])

	network = NeuralNetwork(3, 2, 1, 0.5)
	network.weights_input_to_hidden = test_w_i_h.copy()
	network.weights_hidden_to_output = test_w_h_o.copy()


	network.train(inputs, targets)

	network = NeuralNetwork(3, 2, 1, 0.5)
	network.weights_input_to_hidden = test_w_i_h.copy()
	network.weights_hidden_to_output = test_w_h_o.copy()


	network.run(inputs)
####################
### Set the hyperparameters in you myanswers.py file ###
####################

from my_answers import iterations, learning_rate, hidden_nodes, output_nodes

N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)

losses = {'train': [], 'validation': []}
for ii in range(iterations):
    # Go through a random batch of 128 records from the training data set
    batch = np.random.choice(train_features.index, size=128)
    X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']

    network.train(X, y)

    # Printing out the training progress
    train_loss = MSE(
        network.run(train_features).T, train_targets['cnt'].values)
    val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
    sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii /
                                                  float(iterations)) +
                     "% ... Training loss: " + str(train_loss)[:5] +
                     " ... Validation loss: " + str(val_loss)[:5])
    sys.stdout.flush()

    losses['train'].append(train_loss)
    losses['validation'].append(val_loss)

# In[ ]:
Exemplo n.º 6
0
import numpy as np
from my_answers import NeuralNetwork

inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]])
test_w_h_o = np.array([[0.3], [-0.1]])

network = NeuralNetwork(3, 2, 1, 0.5)

network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()

network.train(inputs, targets)

# -----------------------------

import unittest

inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]])
test_w_h_o = np.array([[0.3], [-0.1]])


class TestMethods(unittest.TestCase):

    ##########
    # Unit tests for data loading
    ##########
Exemplo n.º 7
0
            optimizer = optim.SGD(network.parameters(), lr = learning_rate)
            last_lr = learning_rate
    if ii == 5000:
        if isinstance(optimizer, optim.SGD):
            learning_rate = 0.05
            print("\n  reducing learning rate from", last_lr, "to", learning_rate)
            optimizer = optim.SGD(network.parameters(), lr = learning_rate)

    # Go through a random batch of 128 records from the training data set
    batch_idx = np.random.choice(train_features.index, size=128)
    data, targets = train_features.ix[batch_idx].values, train_targets.ix[batch_idx]['cnt']   

    ###################
    # train the model #
    ###################
    network.train()
    train_loss = 0

    # clear the gradients of all optimized variables
    optimizer.zero_grad()
    # forward pass: compute predicted outputs by passing inputs to the model
    data = data.astype(np.float32)
    output = network(torch.from_numpy(data))

    # calculate the loss
    targets = targets.astype(np.float32)
    targets_tensor = torch.from_numpy(targets.values)
    loss = criterion(output.squeeze(), targets_tensor)
    train_loss = loss.item()

    # backward pass: compute gradient of the loss with respect to model parameters