コード例 #1
0
ファイル: test_fann.py プロジェクト: Qwlouse/MontyLearning
def test_FANN_recurrent_gradient_multisample():
    rc = ForwardAndRecurrentConnection(4,1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_allclose(grad_c, grad_e, rtol=1e-3, atol=1e-5)
コード例 #2
0
ファイル: test_fann.py プロジェクト: nagyistoce/MontyLearning
def test_FANN_recurrent_gradient_multisample():
    rc = ForwardAndRecurrentConnection(4, 1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_allclose(grad_c, grad_e, rtol=1e-3, atol=1e-5)
コード例 #3
0
ファイル: test_fann.py プロジェクト: nagyistoce/MontyLearning
def test_FANN_with_bias_gradient_multisample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X_nb, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X_nb, T)
    assert_almost_equal(grad_c, grad_e)
コード例 #4
0
ファイル: test_fann.py プロジェクト: Qwlouse/MontyLearning
def test_FANN_gradient_multisample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_almost_equal(grad_c, grad_e)
コード例 #5
0
ファイル: test_fann.py プロジェクト: nagyistoce/MontyLearning
def test_FANN_recurrent_gradient_single_sample():
    rc = ForwardAndRecurrentConnection(1, 1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    for x, t in [[0, 1], [1, 1], [0, 0]]:
        x = np.array([[x]])
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
コード例 #6
0
ファイル: test_fann.py プロジェクト: nagyistoce/MontyLearning
def test_FANN_gradient_single_sample():
    fc = FullConnection(4, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X, T):
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
コード例 #7
0
ファイル: test_fann.py プロジェクト: Qwlouse/MontyLearning
def test_FANN_with_bias_gradient_single_sample():
    fc = FullConnectionWithBias(3, 1)
    sig = SigmoidLayer(1)
    nn = FANN([fc, sig])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X_nb, T) :
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
コード例 #8
0
ファイル: test_fann.py プロジェクト: Qwlouse/MontyLearning
def test_FANN_recurrent_gradient_single_sample():
    rc = ForwardAndRecurrentConnection(1,1)
    nn = FANN([rc])
    theta = 2 * np.ones((nn.get_param_dim()))
    for x, t in [[0, 1], [1, 1], [0, 0]] :
        x = np.array([[x]])
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
コード例 #9
0
ファイル: iris.py プロジェクト: Qwlouse/MontyLearning
def create_neural_network(in_size, hidden_size, out_size, rnd, logger):
    logger.info("Creating a NN with {} inputs, {} hidden units, and {} output units.".format(in_size, hidden_size, out_size))
    c0 = FullConnectionWithBias(in_size, hidden_size)
    s0 = SigmoidLayer(hidden_size)
    c1 = FullConnectionWithBias(hidden_size, out_size)
    s1 = SigmoidLayer(out_size)
    nn = FANN([c0, s0, c1, s1])
    theta = rnd.randn(nn.get_param_dim())
    return nn, theta
コード例 #10
0
ファイル: test_fann.py プロジェクト: Qwlouse/MontyLearning
def test_FANN_multilayer_with_bias_gradient_multisample():
    fc0 = FullConnectionWithBias(3, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X_nb, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X_nb, T)
    assert_almost_equal(grad_c, grad_e)
コード例 #11
0
ファイル: test_fann.py プロジェクト: nagyistoce/MontyLearning
def test_FANN_multilayer_gradient_multisample():
    fc0 = FullConnectionWithBias(4, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    grad_c = nn.calculate_gradient(theta, X, T)
    grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, X, T)
    assert_almost_equal(grad_c, grad_e)
コード例 #12
0
ファイル: test_fann.py プロジェクト: nagyistoce/MontyLearning
def test_FANN_with_bias_multilayer_gradient_single_sample():
    fc0 = FullConnectionWithBias(3, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X_nb, T):
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
コード例 #13
0
ファイル: test_fann.py プロジェクト: Qwlouse/MontyLearning
def test_FANN_multilayer_gradient_single_sample():
    fc0 = FullConnection(4, 2)
    fc1 = FullConnection(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    theta = np.random.randn(nn.get_param_dim())
    for x, t in zip(X, T) :
        grad_c = nn.calculate_gradient(theta, x, t)
        grad_e = approx_fprime(theta, nn.calculate_error, 1e-8, x, t)
        assert_almost_equal(grad_c, grad_e)
コード例 #14
0
ファイル: iris.py プロジェクト: nagyistoce/MontyLearning
def create_neural_network(in_size, hidden_size, out_size, rnd, logger):
    logger.info(
        "Creating a NN with {} inputs, {} hidden units, and {} output units.".
        format(in_size, hidden_size, out_size))
    c0 = FullConnectionWithBias(in_size, hidden_size)
    s0 = SigmoidLayer(hidden_size)
    c1 = FullConnectionWithBias(hidden_size, out_size)
    s1 = SigmoidLayer(out_size)
    nn = FANN([c0, s0, c1, s1])
    theta = rnd.randn(nn.get_param_dim())
    return nn, theta
コード例 #15
0
def test_FANN_converges_on_xor_problem():
    fc0 = FullConnectionWithBias(2, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    xor = load_xor()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(2000):
        g = nn.calculate_gradient(theta, xor.data, xor.target)
        theta -= g * 1
    error = nn.calculate_error(theta, xor.data, xor.target)
    assert_less(error, 0.4)
コード例 #16
0
def test_RANN_converges_on_ropot_problem():
    frc = ForwardAndRecurrentSigmoidConnection(5, 5)
    nn = FANN([frc])
    rpot = generate_remember_pattern_over_time()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(100):
        grad = np.zeros_like(theta)
        for X, T in seqEnum(rpot):
            grad += nn.calculate_gradient(theta, X, T)
        theta -= grad * 1

    error = np.sum(nn.calculate_error(theta, X, T) for X, T in seqEnum(rpot))
    assert_less(error, 10.)
コード例 #17
0
def test_FANN_converges_on_xor_problem():
    fc0 = FullConnectionWithBias(2, 2)
    fc1 = FullConnectionWithBias(2, 1)
    sig0 = SigmoidLayer(2)
    sig1 = SigmoidLayer(1)
    nn = FANN([fc0, sig0, fc1, sig1])
    xor = load_xor()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(2000):
        g = nn.calculate_gradient(theta, xor.data, xor.target)
        theta -= g * 1
    error = nn.calculate_error(theta, xor.data, xor.target)
    assert_less(error,  0.4)
コード例 #18
0
def test_RANN_converges_on_ropot_problem():
    frc = ForwardAndRecurrentSigmoidConnection(5, 5)
    nn = FANN([frc])
    rpot = generate_remember_pattern_over_time()
    theta = np.random.randn(nn.get_param_dim())
    for i in range(100):
        grad = np.zeros_like(theta)
        for X, T in seqEnum(rpot):
            grad += nn.calculate_gradient(theta, X, T)
        theta -= grad * 1

    error = np.sum(nn.calculate_error(theta, X, T) for X, T in seqEnum(rpot))
    assert_less(error, 10.)