예제 #1
0
def test_FANN_recurrent_gradient_multisample():
    rc = ForwardAndRecurrentConnection(4,1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_allclose(grad_c, grad_e, rtol=1e-3, atol=1e-5)
예제 #2
0
def test_FANN_error_single_sample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    for x, t, e in zip(X, T, E) :
        assert_equal(nn.calculate_error(theta, x, t), 0)
        assert_equal(nn.calculate_error(theta, x, 0), e)
예제 #3
0
def test_FANN_with_bias_feed_forward_single_sample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    for x, t in zip(X_nb, T):
        t = np.atleast_2d(t)
        assert_equal(nn.forward_pass(theta, x), t)
예제 #4
0
def test_FANN_recurrent_gradient_multisample():
    rc = ForwardAndRecurrentConnection(4, 1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_allclose(grad_c, grad_e, rtol=1e-3, atol=1e-5)
예제 #5
0
def test_FANN_with_bias_feed_forward_single_sample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    for x, t in zip(X_nb, T) :
        t = np.atleast_2d(t)
        assert_equal(nn.forward_pass(theta, x), t)
예제 #6
0
def test_FANN_error_single_sample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    for x, t, e in zip(X, T, E):
        assert_equal(nn.calculate_error(theta, x, t), 0)
        assert_equal(nn.calculate_error(theta, x, 0), e)
예제 #7
0
def test_FANN_with_bias_gradient_multisample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X_nb, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X_nb, T)
    assert_almost_equal(grad_c, grad_e)
예제 #8
0
def test_FANN_gradient_multisample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_almost_equal(grad_c, grad_e)
예제 #9
0
def create_neural_network(in_size, hidden_size, out_size, rnd, logger):
    logger.info("Creating a NN with {} inputs, {} hidden units, and {} output units.".format(in_size, hidden_size, out_size))
    c0 = FullConnectionWithBias(in_size, hidden_size)
    s0 = SigmoidLayer(hidden_size)
    c1 = FullConnectionWithBias(hidden_size, out_size)
    s1 = SigmoidLayer(out_size)
    nn = FANN([c0, s0, c1, s1])
    theta = rnd.randn(nn.get_param_dim())
    return nn, theta
예제 #10
0
def test_FANN_recurrent_gradient_single_sample():
    rc = ForwardAndRecurrentConnection(1, 1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    for x, t in [[0, 1], [1, 1], [0, 0]]:
        x = np.array([[x]])
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
예제 #11
0
def test_FANN_with_bias_gradient_single_sample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X_nb, T) :
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
예제 #12
0
def test_FANN_gradient_single_sample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X, T):
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
예제 #13
0
def test_FANN_recurrent_gradient_single_sample():
    rc = ForwardAndRecurrentConnection(1,1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    for x, t in [[0, 1], [1, 1], [0, 0]] :
        x = np.array([[x]])
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
예제 #14
0
def test_FANN_multilayer_gradient_multisample():
    fc0 = FullConnectionWithBias(4, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_almost_equal(grad_c, grad_e)
예제 #15
0
def test_FANN_multilayer_with_bias_gradient_multisample():
    fc0 = FullConnectionWithBias(3, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X_nb, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X_nb, T)
    assert_almost_equal(grad_c, grad_e)
예제 #16
0
def test_FANN_converges_on_vote_problem():
    fc = FullConnectionWithBias(9, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    vote = generate_majority_vote()
    theta = np.zeros((10,))
    for i in range(500):
        g = nn.calculate_gradient(theta, vote.data, vote.target)
        theta -= g * 1
    error = nn.calculate_error(theta, vote.data, vote.target)
    assert_less(error,  0.2)
예제 #17
0
def test_FANN_multilayer_gradient_single_sample():
    fc0 = FullConnection(4, 2)
    fc1 = FullConnection(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X, T) :
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
예제 #18
0
def test_FANN_converges_on_and_problem():
    fc = FullConnection(2, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    and_ = load_and()
    theta = np.array([-0.1, 0.1])
    for i in range(100):
        g = nn.calculate_gradient(theta, and_.data, and_.target)
        theta -= g * 1
    error = nn.calculate_error(theta, and_.data, and_.target)
    assert_less(error,  0.2)
예제 #19
0
def test_FANN_converges_on_vote_problem():
    fc = FullConnectionWithBias(9, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    vote = generate_majority_vote()
    theta = np.zeros((10, ))
    for i in range(500):
        g = nn.calculate_gradient(theta, vote.data, vote.target)
        theta -= g * 1
    error = nn.calculate_error(theta, vote.data, vote.target)
    assert_less(error, 0.2)
예제 #20
0
def create_neural_network(in_size, hidden_size, out_size, rnd, logger):
    logger.info(
        "Creating a NN with {} inputs, {} hidden units, and {} output units.".
        format(in_size, hidden_size, out_size))
    c0 = FullConnectionWithBias(in_size, hidden_size)
    s0 = SigmoidLayer(hidden_size)
    c1 = FullConnectionWithBias(hidden_size, out_size)
    s1 = SigmoidLayer(out_size)
    nn = FANN([c0, s0, c1, s1])
    theta = rnd.randn(nn.get_param_dim())
    return nn, theta
예제 #21
0
def test_FANN_converges_on_and_problem():
    fc = FullConnection(2, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    and_ = load_and()
    theta = np.array([-0.1, 0.1])
    for i in range(100):
        g = nn.calculate_gradient(theta, and_.data, and_.target)
        theta -= g * 1
    error = nn.calculate_error(theta, and_.data, and_.target)
    assert_less(error, 0.2)
예제 #22
0
def test_FANN_with_bias_multilayer_gradient_single_sample():
    fc0 = FullConnectionWithBias(3, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X_nb, T):
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
예제 #23
0
def test_FANN_converges_on_xor_problem():
    fc0 = FullConnectionWithBias(2, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    xor = load_xor()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(2000):
        g = nn.calculate_gradient(theta, xor.data, xor.target)
        theta -= g * 1
    error = nn.calculate_error(theta, xor.data, xor.target)
    assert_less(error,  0.4)
예제 #24
0
def test_RANN_converges_on_ropot_problem():
    frc = ForwardAndRecurrentSigmoidConnection(5, 5)
    nn = FANN([frc])
    rpot = generate_remember_pattern_over_time()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(100):
        grad = np.zeros_like(theta)
        for X, T in seqEnum(rpot):
            grad += nn.calculate_gradient(theta, X, T)
        theta -= grad * 1

    error = np.sum(nn.calculate_error(theta, X, T) for X, T in seqEnum(rpot))
    assert_less(error, 10.)
예제 #25
0
def test_FANN_converges_on_xor_problem():
    fc0 = FullConnectionWithBias(2, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    xor = load_xor()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(2000):
        g = nn.calculate_gradient(theta, xor.data, xor.target)
        theta -= g * 1
    error = nn.calculate_error(theta, xor.data, xor.target)
    assert_less(error, 0.4)
예제 #26
0
def test_RANN_converges_on_ropot_problem():
    frc = ForwardAndRecurrentSigmoidConnection(5, 5)
    nn = FANN([frc])
    rpot = generate_remember_pattern_over_time()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(100):
        grad = np.zeros_like(theta)
        for X, T in seqEnum(rpot):
            grad += nn.calculate_gradient(theta, X, T)
        theta -= grad * 1

    error = np.sum(nn.calculate_error(theta, X, T) for X, T in seqEnum(rpot))
    assert_less(error, 10.)
예제 #27
0
def test_FANN_feed_forward_multisample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    assert_equal(nn.forward_pass(theta, X), T)
예제 #28
0
def test_FANN_with_bias_error_multisample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    assert_equal(nn.calculate_error(theta, X_nb, T), 0.0)
    assert_equal(nn.calculate_error(theta, X_nb, np.zeros_like(T)), np.sum(E))
예제 #29
0
def test_FANN_with_bias_feed_forward_multisample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    assert_equal(nn.forward_pass(theta, X_nb), T)
예제 #30
0
def test_FANN_feed_forward_multisample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    assert_equal(nn.forward_pass(theta, X), T)
예제 #31
0
def test_FANN_with_bias_feed_forward_multisample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    assert_equal(nn.forward_pass(theta, X_nb), T)
예제 #32
0
def test_FANN_with_bias_error_multisample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    assert_equal(nn.calculate_error(theta, X_nb, T), 0.0)
    assert_equal(nn.calculate_error(theta, X_nb, np.zeros_like(T)), np.sum(E))
예제 #33
0
def test_FANN_with_bias_dimensions():
    fc = FullConnectionWithBias(5, 1)
    nn = FANN([fc])
    assert_equal(nn.input_size, 5)
    assert_equal(nn.output_size, 1)
예제 #34
0
def build_nn(hidden_units):
    l0 = FullConnectionWithBias(4, hidden_units)
    s0 = SigmoidLayer(hidden_units)
    l1 = FullConnectionWithBias(hidden_units, 3)
    s1 = SigmoidLayer(3)
    return FANN([l0, s0, l1, s1])