def test_layer_sizes(self): nnet = NetFactory.create_neural_net(sizes=[8, 24, 15]) sizes = nnet.layer_sizes() self.assertEqual(sizes, [8, 24, 15]) nnet = NetFactory.create_neural_net(sizes=[2, 3, 18, 8]) sizes = nnet.layer_sizes() self.assertEqual(sizes, [2, 3, 18, 8])
def test_create_neural_net(self): nnet = NetFactory.create_neural_net( sizes=[3, 2, 5, 4], hidden_layer_activation=activation_functions.Rectifier, output_layer_activation=activation_functions.Softmax) self.assertIsInstance(nnet, NeuralNet) self.assertEqual(nnet.layer_sizes(), [3, 2, 5, 4])
def setUp(self): x = np.array([5, 2], float) y = np.array([0.25, 0, 1], float) self.examples = PreloadSource(([x], [y])) nnet = NetFactory.create_neural_net(sizes=[2, 3, 3]) self.nnet = nnet self.Descent = GradientDescent self.cost_function = QuadraticCost(self.nnet)
def test_with_default_activations(self): nnet = NetFactory.create_neural_net(sizes=[3, 2, 5, 4]) for i in range(2): layer = nnet.layers()[i] self.assertEqual(layer.get_activation(), activation_functions.Sigmoid) layer = nnet.layers()[-1] self.assertEqual(layer.get_activation(), activation_functions.Sigmoid)
def test_json_structure_is_correct(self): nnet = NetFactory.create_neural_net(sizes=[2, 1, 2]) nnet.save(self.dest_fname) with open(self.dest_fname, 'r') as f: net_params = json.loads(f.read()) self.assertIn('layer_sizes', net_params) self.assertIn('layers', net_params) self.assertIsInstance(net_params['layers'], list) self.assertIn('weights', net_params['layers'][0]) self.assertIn('biases', net_params['layers'][0])
def test_feed_into_with_rectified_unit(self): nnet = NetFactory.create_neural_net(sizes=[3, 1, 2]) nnet.layers()[-1].set_activation(Rectifier) nnet.layers()[1].set_biases(np.array([-1, 2], float)) nnet.layers()[1].set_weights(np.array([[0.1], [0]], float)) x = np.array([3], float) a, z = nnet.feed_into_layer(x, layer=1) self.assertEqual(a[0], 0) self.assertEqual(a[1], 2)
def test_get_cost_initial(self): nnet = NetFactory.create_neural_net(sizes=[1, 1, 1]) xes = [np.array([-10], float), np.array([100], float)] ys = [np.array([0.5], float), np.array([0.75], float)] examples = PreloadSource((xes, ys)) cost_func = cost_functions.QuadraticCost(nnet) cost = cost_func.get_cost(examples) self.assertAlmostEqual(cost, 1.0 / 64, places=4)
def test_json_is_valid(self): nnet = NetFactory.create_neural_net(sizes=[2, 1, 2]) nnet.randomize_parameters() nnet.save(self.dest_fname) with open(self.dest_fname, 'r') as f: s = f.read() try: json.loads(s) except: self.assertTrue(False, 'Invalid json')
def setUp(self): x = np.array([5, 2], float) y = np.array([0.25, 0, 1], float) self.examples = PreloadSource(([x], [y])) nnet = NetFactory.create_neural_net(sizes=[2, 3, 3]) nnet.randomize_parameters() self.nnet = nnet cost_func = QuadraticCost(nnet) self.grad_descent = GradientDescent(neural_net=nnet, cost_function=cost_func)
def _create_network(self): image_width, image_height = self._transform_examples() input_size = image_height * image_width output_size = 10 hidden_sizes = self._config['hidden_layer_sizes'] sizes = [input_size] + hidden_sizes + [output_size] self._neural_net = NetFactory.create_neural_net( sizes=sizes, hidden_layer_activation=self._config['hidden_activation'], output_layer_activation=self._config['output_activation'])
def test_create_neural_net_with_rectifier_and_softmax_activations(self): nnet = NetFactory.create_neural_net( sizes=[3, 2, 5, 4], hidden_layer_activation=activation_functions.Rectifier, output_layer_activation=activation_functions.Softmax) for i in range(2): layer = nnet.layers()[i] self.assertEqual(layer.get_activation(), activation_functions.Rectifier) layer = nnet.layers()[-1] self.assertEqual(layer.get_activation(), activation_functions.Softmax)
def test_gives_correct_output_on_training_data(self): nnet = NetFactory.create_neural_net(sizes=[1, 1, 1]) cost_func = QuadraticCost(neural_net=nnet) gd = GradientDescent(neural_net=nnet, cost_function=cost_func) xes = [np.array([-10], float), np.array([100], float)] ys = [np.array([0.5], float), np.array([0.75], float)] gd.train(data_src=PreloadSource((xes, ys)), nepochs=100) for i in range(len(xes)): res = nnet.feed(xes[i]) self.assertAlmostEqual(res[0], ys[i][0], places=1)
def test_costs_match(self): nnet = NetFactory.create_neural_net(sizes=[2, 3, 1, 10, 2]) nnet.randomize_parameters() cost_func = CrossEntropyCost(nnet) X = [np.array([90, 23], float), np.array([0, 2], float)] Y = [np.array([0.4, 0.6], float), np.array([0.3, 0])] examples = PreloadSource((X, Y)) c1 = cost_func.get_cost(examples) fname = os.path.join('test_temp', 'nets_params.json') nnet.save(dest_fname=fname) nnet = NeuralNet.create_from_file(fname) c2 = cost_func.get_cost(examples) self.assertAlmostEqual(c1, c2, places=4)
def test_correct_parameters(self): nnet = NetFactory.create_neural_net(sizes=[2, 1, 2]) nnet.randomize_parameters() nnet.save(self.dest_fname) with open(self.dest_fname, 'r') as f: net_params = json.loads(f.read()) self.assertEqual(net_params['layer_sizes'], [2, 1, 2]) self.assertEqual(net_params['layers'][0]['weights'], nnet.weights()[0].tolist()) self.assertEqual(net_params['layers'][1]['weights'], nnet.weights()[1].tolist()) self.assertEqual(net_params['layers'][0]['biases'], nnet.biases()[0].tolist()) self.assertEqual(net_params['layers'][1]['biases'], nnet.biases()[1].tolist())
def test_back_propagation_slow(self): nnet = NetFactory.create_neural_net(sizes=[1, 1, 1]) cost_func = QuadraticCost(neural_net=nnet) x = np.array([5], float) y = np.array([0.25], float) examples = ([x], [y]) numerical = NumericalCalculator(data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) w_grad, b_grad = numerical.compute_gradients() w_grad_expected = [np.array([[0]], float), np.array([[1 / 32]], float)] b_grad_expected = [np.array([[0]], float), np.array([[1 / 16]], float)] self.compare_grads(w_grad, w_grad_expected) self.compare_grads(b_grad, b_grad_expected)
def test_compute_gradients_with_cross_entropy_cost(self): nnet = NetFactory.create_neural_net(sizes=[4, 2, 10]) nnet.randomize_parameters() cost_func = cost_functions.CrossEntropyCost(neural_net=nnet) examples = helpers.generate_random_examples(10, 4, 10) calculator = BackPropagationBasedCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) numerical_calculator = NumericalCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) w_grad1, b_grad1 = calculator.compute_gradients() w_grad2, b_grad2 = numerical_calculator.compute_gradients() self.compare_grads(grad1=w_grad1, grad2=w_grad2) self.compare_grads(grad1=b_grad1, grad2=b_grad2)
def test_back_propagation_slow_type_array(self): nnet = NetFactory.create_neural_net(sizes=[2, 1, 2]) cost_func = QuadraticCost(neural_net=nnet) x = np.array([5, 2], float) y = np.array([0.25, 0], float) examples = ([x], [y]) numerical = NumericalCalculator(data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) w_grad, b_grad = numerical.compute_gradients() self.assertIsInstance(w_grad, list) self.assertIsInstance(w_grad[0], np.ndarray) self.assertIsInstance(w_grad[1], np.ndarray) self.assertIsInstance(b_grad, list) self.assertIsInstance(b_grad[0], np.ndarray) self.assertIsInstance(b_grad[1], np.ndarray)
def test_gives_correct_output_for_unseen_data(self): nnet = NetFactory.create_neural_net(sizes=[1, 10, 1]) cost_func = QuadraticCost(neural_net=nnet) gd = GradientDescent(neural_net=nnet, cost_function=cost_func) def parabola(x): return x**2 examples = helpers.generate_data(f=parabola, start_value=-0.6, end_value=-0.4, step_value=0.005) gd.train(data_src=PreloadSource(examples), nepochs=10) xval = -0.5000125 yval = parabola(xval) net_input = np.array([xval], float) output = nnet.feed(net_input) self.assertAlmostEqual(output[0], yval, places=1)
def test_feed_into_layer(self): nnet = NetFactory.create_neural_net(sizes=[2, 3, 2]) x = np.array([5, 10], float) a, z = nnet.feed_into_layer(x, layer=0) self.assertTupleEqual(a.shape, (3, )) self.assertTupleEqual(z.shape, (3, )) self.assertEqual(z[0], 0) self.assertEqual(z[1], 0) self.assertEqual(a[0], 0.5) self.assertEqual(a[1], 0.5) x = np.array([5, 10, 2], float) a, z = nnet.feed_into_layer(x, layer=1) self.assertTupleEqual(a.shape, (2, )) self.assertTupleEqual(z.shape, (2, )) self.assertEqual(z[0], 0) self.assertEqual(z[1], 0) self.assertEqual(a[0], 0.5) self.assertEqual(a[1], 0.5)
def test_returns_correct_gradient_shape(self): nnet = NetFactory.create_neural_net(sizes=[3, 2, 2, 5]) cost_func = cost_functions.QuadraticCost(neural_net=nnet) x = np.array([5, 2, -0.5], float) y = np.array([0.25, 0, 0, 0.7, 0.2], float) examples = ([x], [y]) numerical_calculator = NumericalCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) w_grad, b_grad = numerical_calculator.compute_gradients() self.assertEqual(len(w_grad), 3) self.assertEqual(len(b_grad), 3) self.assertTupleEqual(w_grad[0].shape, (2, 3)) self.assertTupleEqual(w_grad[1].shape, (2, 2)) self.assertTupleEqual(w_grad[2].shape, (5, 2)) self.assertTupleEqual(b_grad[0].shape, (2, )) self.assertTupleEqual(b_grad[1].shape, (2, )) self.assertTupleEqual(b_grad[2].shape, (5, ))
def test_that_returned_type_is_array(self): nnet = NetFactory.create_neural_net(sizes=[2, 1, 2]) cost_func = cost_functions.QuadraticCost(neural_net=nnet) x = np.array([5, 2], float) y = np.array([0.25, 0], float) examples = ([x], [y]) calculator = BackPropagationBasedCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) w_grad, b_grad = calculator.compute_gradients() self.assertIsInstance(w_grad, list) self.assertIsInstance(w_grad[0], np.ndarray) self.assertIsInstance(w_grad[1], np.ndarray) self.assertIsInstance(b_grad, list) self.assertIsInstance(b_grad[0], np.ndarray) self.assertIsInstance(b_grad[1], np.ndarray)
def test_with_rectifer_activation_and_quadratic_cost(self): nnet = NetFactory.create_neural_net( sizes=[4, 2, 10], hidden_layer_activation=activation_functions.Rectifier, output_layer_activation=activation_functions.Rectifier) nnet.randomize_parameters() cost_func = cost_functions.QuadraticCost(neural_net=nnet) examples = helpers.generate_random_examples(10, 4, 10) calculator = BackPropagationBasedCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) numerical_calculator = NumericalCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) w_grad1, b_grad1 = calculator.compute_gradients() w_grad2, b_grad2 = numerical_calculator.compute_gradients() self.compare_grads(grad1=w_grad1, grad2=w_grad2) self.compare_grads(grad1=b_grad1, grad2=b_grad2)
def test_with_regularized_quadratic_loss(self): nnet = NetFactory.create_neural_net(sizes=[4, 2, 10]) nnet.randomize_parameters() reglambda = 2.5 cost_func = cost_functions.QuadraticCost(neural_net=nnet, l2_reg_term=reglambda) examples = helpers.generate_random_examples(10, 4, 10) calculator = BackPropagationBasedCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) numerical_calculator = NumericalCalculator( data_src=PreloadSource(examples), neural_net=nnet, cost_function=cost_func) w_grad1, b_grad1 = calculator.compute_gradients() w_grad2, b_grad2 = numerical_calculator.compute_gradients() self.compare_grads(grad1=w_grad1, grad2=w_grad2) self.compare_grads(grad1=b_grad1, grad2=b_grad2)
def setUp(self): self.net = NetFactory.create_neural_net(sizes=[3, 1, 2])
def test_feed_after_initialization(self): nnet = NetFactory.create_neural_net(sizes=[3, 2, 2]) x = np.array([1, 9, 323], float) a = nnet.feed(x) self.assertEqual(a[0], 0.5) self.assertEqual(a[1], 0.5)
def test_init(self): nnet = NetFactory.create_neural_net(sizes=[2, 3, 5]) cost_func = QuadraticCost(nnet) grad_descent = GradientDescent(neural_net=nnet, cost_function=cost_func)
def __init__(self): self._nnet = NetFactory.create_neural_net(sizes=[10, 30, 784])
def step(context, sizes_csv): sizes = [int(sz) for sz in sizes_csv.split(',')] context.nnet = NetFactory.create_neural_net(sizes=sizes)
def step(context): context.nnet = NetFactory.create_neural_net(sizes=[1, 10, 1]) context.nnet.randomize_parameters()
def test_creates_file(self): nnet = NetFactory.create_neural_net(sizes=[2, 1, 2]) dest_fname = os.path.join('test_temp', 'nets_params.json') nnet.save(dest_fname=dest_fname) self.assertTrue(os.path.isfile(dest_fname))