def test_predict(input_units, hidden_units, nr_samples, rgen): nn = FcClassifier(input_units, hidden_units) nn.init_random() parameters = nn.get_weights() weights = list(rgen.randn(*w.shape) for w, _ in parameters) biases = list(np.zeros(b.shape) for _, b in parameters) for n, (w, b) in enumerate(zip(weights, biases)): nn.set_weights(n, w, b) x_in = rgen.randn(input_units, nr_samples) y_ref = fcnn_predict(x_in, weights, biases, it.repeat(sigmoid))[0, :] y_hat = nn.predict(x_in) assert_almost_equal(y_hat, y_ref)
def test_train(input_units, hidden_units, batch_size, nr_samples, rgen): nn = FcClassifier(input_units, hidden_units) nn.init_random() x_in = rgen.randn(nr_samples, input_units) y_in = rgen.randint(2, size=nr_samples) cost_old = nn.evaluate(x_in.T, y_in) nn.train(x_in, y_in, learning_rate=0.0005, nr_epochs=1, batch_size=batch_size) cost_new = nn.evaluate(x_in.T, y_in) assert cost_old > cost_new
def test_backprop(input_units, hidden_units, rgen): nn = FcClassifier(input_units, hidden_units) nn.init_random() parameters = nn.get_weights() weights = list(w for w, _ in parameters) biases = list(b for _, b in parameters) x_in = rgen.randn(input_units, 1) cost, grads = nn.back_propagate(x_in, 1.0) grad_w = [w for w, _ in grads] grad_b = [b for _, b in grads] y_hat = lambda weights: fcnn_predict(x_in, weights, biases, it.repeat(sigmoid)) costf = lambda weights: cross_entropy(1.0, y_hat(weights)) grad_costf_ref = grad(costf)(weights) for w, w_ref in zip(grad_w, grad_costf_ref): assert_array_almost_equal(w, w_ref) y_hat = lambda biases: fcnn_predict(x_in, weights, biases, it.repeat(sigmoid)) costf = lambda biases: cross_entropy(1, y_hat(biases)) grad_costf_ref = grad(costf)(biases) for b, b_ref in zip(grad_b, grad_costf_ref): assert_array_almost_equal(b, b_ref) assert_almost_equal(cost, costf(biases))
def test_view_set_weights_permissions(): shape = (5, 1) nn = FcClassifier(shape[0], tuple()) new_weight = np.random.randn(shape[1], shape[0]) new_bias = np.random.randn(shape[1], ) new_weight_copy = new_weight.copy() new_bias_copy = new_bias.copy() nn.set_weights(0, new_weight, new_bias) new_weight[:] = 0 new_bias[:] = 0 nn_weight, nn_bias = nn.get_weights()[0] assert_array_equal(new_weight_copy, nn_weight) assert_array_equal(new_bias_copy, nn_bias) assert not nn_weight.flags['OWNDATA'] assert not nn_bias.flags['OWNDATA'] assert not nn_weight.flags['WRITEABLE'] assert not nn_bias.flags['WRITEABLE'] del nn assert_array_equal(new_weight_copy, nn_weight) assert_array_equal(new_bias_copy, nn_bias)
def test_random_initialization(input_units, hidden_units): nn = FcClassifier(input_units, hidden_units) weights = nn.get_weights() for w, b in weights: assert_almost_equal(np.linalg.norm(w), 0) assert_almost_equal(np.linalg.norm(b), 0) nn.init_random() weights = nn.get_weights() for w, b in weights: assert np.linalg.norm(w) > .5
def test_view_set_weights(): shape = (5, 8, 1) nn = FcClassifier(shape[0], shape[1:-1]) weights = nn.get_weights() for i in range(len(shape) - 1): # +1 Due to implicit weights w, b = weights[i] assert w.shape == (shape[i + 1], shape[i]) assert b.shape == (shape[i + 1], ) new_weight = np.random.randn(shape[1], shape[0]) new_bias = np.random.randn(shape[1], ) nn.set_weights(0, new_weight, new_bias) assert_array_equal(new_weight, nn.get_weights()[0][0]) assert_array_equal(new_bias, nn.get_weights()[0][1])
def test_evaluate(input_units, hidden_units, nr_samples, rgen): nn = FcClassifier(input_units, hidden_units) nn.init_random() parameters = nn.get_weights() weights = list(w for w, _ in parameters) biases = list(b for _, b in parameters) x_in = rgen.randn(input_units, nr_samples) y_in = rgen.randint(2, size=nr_samples) cost = nn.evaluate(x_in, y_in) y_ref = fcnn_predict(x_in, weights, biases, it.repeat(sigmoid)) cost_ref = cross_entropy(y_in, y_ref) assert_almost_equal(cost, cost_ref)
def test_set_weights_exception(rgen): shape = (5, 1) nn = FcClassifier(shape[0], tuple()) try: nn.set_weights(0, rgen.randn(shape[1] + 3, shape[0]), rgen.randn(shape[1])) except ValueError: pass else: raise AssertionError( "Setting weight with wrong shape should raise error") try: nn.set_weights(0, rgen.randn(shape[1], shape[0]), rgen.randn(shape[1] + 1)) except ValueError: pass else: raise AssertionError( "Setting bias with wrong shape should raise error")
def test_init(input_units, hidden_units): nn = FcClassifier(input_units, hidden_units) assert nn.hidden_layers == len(hidden_units) assert nn.input_units == input_units assert tuple(nn.hidden_units) == hidden_units