Exemplo n.º 1
0
    def test_derivate_large(self):
        classes = ['a', 'b', 'c']
        y = 'b'
        x = random.randn(8, 3, 10) * 5 + 3
        state_machine = DefaultStateMachine(classes)
        parameters = Hacrf._initialize_parameters(state_machine, x.shape[2])
        parameters = random.randn(*parameters.shape) * 10 - 2

        test_model = _Model(state_machine, x, y)
        print(test_model._lattice)

        expected_dll = np.zeros(parameters.shape)

        # Finite difference gradient approximation
        delta = 10.0**-7
        S, D = expected_dll.shape
        for s in range(S):
            for d in range(D):
                dg = np.zeros(parameters.shape)
                dg[s, d] = delta
                y0, _ = test_model.forward_backward(parameters)
                y1, _ = test_model.forward_backward(parameters + dg)
                print(s, d, y0, y1)
                expected_dll[s, d] = (y1 - y0) / delta

        actual_ll, actual_dll = test_model.forward_backward(parameters)

        print(expected_dll)
        print(actual_dll)
        self.assertEqual((np.isnan(actual_dll)).any(), False)
        assert_array_almost_equal(actual_dll, expected_dll, decimal=TEST_PRECISION)
Exemplo n.º 2
0
def test_derivate_large():
    classes = ['a', 'b', 'c']
    y = 'b'
    x = random.randn(20, 3, 10) * 5 + 3
    state_machine, states_to_classes = Hacrf._default_state_machine(classes)
    parameters = Hacrf._initialize_parameters(state_machine, x.shape[2])
    parameters = random.randn(*parameters.shape) * 10 - 2

    test_model = _Model(state_machine, states_to_classes, x, y)
    expected_dll = np.zeros(parameters.shape)

    # Finite difference gradient approximation
    delta = 10.0**-7
    S, D = expected_dll.shape
    for s in xrange(S):
        for d in xrange(D):
            dg = np.zeros(parameters.shape)
            dg[s, d] = delta
            y0, _ = test_model.forward_backward(parameters)
            y1, _ = test_model.forward_backward(parameters + dg)
            expected_dll[s, d] = (y1 - y0) / delta

    actual_ll, actual_dll = test_model.forward_backward(parameters)

    print (abs(actual_dll) - abs(expected_dll)).sum()
    assert_array_almost_equal(actual_dll, expected_dll, decimal=4)
Exemplo n.º 3
0
    def test_derivate_medium(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        x = np.array([[[0, 1],
                       [2, 1]],
                      [[0, 1],
                       [1, 0.0]]])
        y = 'a'
        state_machine = DefaultStateMachine(classes)
        test_model = _Model(state_machine, x, y)
        print(test_model._lattice)

        expected_dll = np.zeros(parameters.shape)

        # Finite difference gradient approximation
        delta = 10.0**-7
        S, D = expected_dll.shape
        for s in range(S):
            for d in range(D):
                dg = np.zeros(parameters.shape)
                dg[s, d] = delta
                y0, _ = test_model.forward_backward(parameters)
                y1, _ = test_model.forward_backward(parameters + dg)
                print(s, d, y0, y1)
                expected_dll[s, d] = (y1 - y0) / delta

        actual_ll, actual_dll = test_model.forward_backward(parameters)

        print(expected_dll)
        print(actual_dll)
        assert_array_almost_equal(actual_dll, expected_dll, decimal=TEST_PRECISION)
Exemplo n.º 4
0
def test_derivate_large():
    classes = ['a', 'b', 'c']
    y = 'b'
    x = random.randn(20, 3, 10) * 5 + 3
    state_machine, states_to_classes = Hacrf._default_state_machine(classes)
    parameters = Hacrf._initialize_parameters(state_machine, x.shape[2])
    parameters = random.randn(*parameters.shape) * 10 - 2

    test_model = _Model(state_machine, states_to_classes, x, y)
    expected_dll = np.zeros(parameters.shape)

    # Finite difference gradient approximation
    delta = 10.0**-7
    S, D = expected_dll.shape
    for s in xrange(S):
        for d in xrange(D):
            dg = np.zeros(parameters.shape)
            dg[s, d] = delta
            y0, _ = test_model.forward_backward(parameters)
            y1, _ = test_model.forward_backward(parameters + dg)
            expected_dll[s, d] = (y1 - y0) / delta

    actual_ll, actual_dll = test_model.forward_backward(parameters)

    print(abs(actual_dll) - abs(expected_dll)).sum()
    assert_array_almost_equal(actual_dll, expected_dll, decimal=4)
Exemplo n.º 5
0
    def test_derivate_medium(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        x = np.array([[[0, 1], [2, 1]], [[0, 1], [1, 0.0]]])
        y = 'a'
        state_machine = DefaultStateMachine(classes)
        test_model = _Model(state_machine, x, y)
        print(test_model._lattice)

        expected_dll = np.zeros(parameters.shape)

        # Finite difference gradient approximation
        delta = 10.0**-7
        S, D = expected_dll.shape
        for s in range(S):
            for d in range(D):
                dg = np.zeros(parameters.shape)
                dg[s, d] = delta
                y0, _ = test_model.forward_backward(parameters)
                y1, _ = test_model.forward_backward(parameters + dg)
                print(s, d, y0, y1)
                expected_dll[s, d] = (y1 - y0) / delta

        actual_ll, actual_dll = test_model.forward_backward(parameters)

        print(expected_dll)
        print(actual_dll)
        assert_array_almost_equal(actual_dll,
                                  expected_dll,
                                  decimal=TEST_PRECISION)
Exemplo n.º 6
0
    def test_derivate_large(self):
        classes = ['a', 'b', 'c']
        y = 'b'
        x = random.randn(8, 3, 10) * 5 + 3
        state_machine = DefaultStateMachine(classes)
        parameters = Hacrf._initialize_parameters(state_machine, x.shape[2])
        parameters = random.randn(*parameters.shape) * 10 - 2

        test_model = _Model(state_machine, x, y)
        print(test_model._lattice)

        expected_dll = np.zeros(parameters.shape)

        # Finite difference gradient approximation
        delta = 10.0**-7
        S, D = expected_dll.shape
        for s in range(S):
            for d in range(D):
                dg = np.zeros(parameters.shape)
                dg[s, d] = delta
                y0, _ = test_model.forward_backward(parameters)
                y1, _ = test_model.forward_backward(parameters + dg)
                print(s, d, y0, y1)
                expected_dll[s, d] = (y1 - y0) / delta

        actual_ll, actual_dll = test_model.forward_backward(parameters)

        print(expected_dll)
        print(actual_dll)
        self.assertEqual((np.isnan(actual_dll)).any(), False)
        assert_array_almost_equal(actual_dll,
                                  expected_dll,
                                  decimal=TEST_PRECISION)
Exemplo n.º 7
0
    def test_forward_single(self):
        start_states = [0, 1]
        transitions = [(0, 0, (1, 1)),
                       (0, 1, (0, 1)),
                       (0, 0, (1, 0)),
                       (0, 2, lambda i, j, k: (0, 2))]
        states_to_classes = {0: 'a', 1: 'a', 2: 'b'}  # Dummy

        state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)

        parameters = np.array(range(-7, 7), dtype='float64').reshape((7, 2))
        # parameters =
        # 0([[-7, -6],
        # 1  [-5, -4],
        # 2  [-3, -2],
        # 3  [-1,  0],
        # 4  [ 1,  2],
        # 5  [ 3,  4],
        # 6  [ 5,  6]])
        x = np.array([[[0, 1],
                       [1, 0],
                       [2, 1]],
                      [[0, 1],
                       [1, 0],
                       [1, 0]]])
        y = 'a'
        # Expected lattice:
        #               #     ________
        # 1.  .  .      # 1  0  __0 - 21
        #               #    | /
        # 0.  .  .      # 0  0
        #  0  1  2      #    0    1    2
        expected_alpha = {
            (0, 0, 0): np.exp(-6),
            (0, 0, 0, 1, 0, 0, 5): np.exp(-6) * np.exp(4),
            (0, 0, 0, 1, 1, 0, 3): np.exp(-6) * np.exp(-1),
            (1, 0, 0): np.exp(-6) * np.exp(4) * np.exp(-6),
            (1, 0, 0, 1, 2, 2, 6): np.exp(-6) * np.exp(4) * np.exp(-6) * np.exp(5),
            (1, 1, 0): np.exp(-6) * np.exp(-1) * np.exp(-7),
            (1, 1, 0, 1, 2, 1, 4): np.exp(-6) * np.exp(-1) * np.exp(-7) * np.exp(1),
            (1, 2, 1): np.exp(-6) * np.exp(-1) * np.exp(-7) * np.exp(1) * np.exp(-5),
            (1, 2, 2): np.exp(-6) * np.exp(4) * np.exp(-6) * np.exp(5) * np.exp(-3)
        }
        expected_alpha = {k: np.emath.log(v) for k, v in expected_alpha.items()}
        test_model = _Model(state_machine, x, y)
        x_dot_parameters = np.dot(x, parameters.T)  # Pre-compute the dot product
        actual_alpha = test_model._forward(x_dot_parameters)

        actual_alpha = {k: v for k, v in actual_alpha.items()
                        if not np.isneginf(v)}
        print(actual_alpha)

        self.assertEqual(len(actual_alpha), len(expected_alpha))
        print
        for key in sorted(expected_alpha.keys()):
            print(key, (expected_alpha[key]), (actual_alpha[key]))
            self.assertEqual(actual_alpha[key], expected_alpha[key])
Exemplo n.º 8
0
    def test_forward_connected(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        # parameters =
        #0([[-8, -7],
        #1  [-6, -5],
        #2  [-4, -3],
        #3  [-2, -1],
        #4  [ 0,  1],
        #5  [ 2,  3],
        #6  [ 4,  5],
        #7  [ 6,  7]])
        x = np.array([[[0, 1], [2, 1]], [[0, 1], [1, 0]]])
        y = 'a'
        expected_alpha = {
            (0, 0, 0): np.exp(-7),
            (0, 0, 0, 0, 1, 0, 4): np.exp(-7) * np.exp(1),
            (0, 0, 0, 1, 0, 0, 6): np.exp(-7) * np.exp(5),
            (0, 0, 0, 1, 1, 0, 2): np.exp(-7) * np.exp(-4),
            (0, 0, 1): np.exp(-5),
            (0, 0, 1, 0, 1, 1, 5): np.exp(-5) * np.exp(7),
            (0, 0, 1, 1, 0, 1, 7): np.exp(-5) * np.exp(7),
            (0, 0, 1, 1, 1, 1, 3): np.exp(-5) * np.exp(-2),
            (0, 1, 0): np.exp(-7) * np.exp(1) * np.exp(-23),
            (0, 1, 0, 1, 1, 0, 6):
            np.exp(-7) * np.exp(1) * np.exp(-23) * np.exp(4),
            (0, 1, 1): np.exp(-5) * np.exp(7) * np.exp(-17),
            (0, 1, 1, 1, 1, 1, 7):
            np.exp(-5) * np.exp(7) * np.exp(-17) * np.exp(6),
            (1, 0, 0): np.exp(-7) * np.exp(5) * np.exp(-7),
            (1, 0, 0, 1, 1, 0, 4):
            np.exp(-7) * np.exp(5) * np.exp(-7) * np.exp(0),
            (1, 0, 1): np.exp(-5) * np.exp(7) * np.exp(-5),
            (1, 0, 1, 1, 1, 1, 5):
            np.exp(-5) * np.exp(7) * np.exp(-5) * np.exp(2),
            (1, 1, 0): (np.exp(-11) + np.exp(-25) + np.exp(-9)) * np.exp(-8),
            (1, 1, 1): (np.exp(-1) + np.exp(-9) + np.exp(-7)) * np.exp(-6)
        }
        expected_alpha = {
            k: np.emath.log(v)
            for k, v in expected_alpha.items()
        }

        state_machine = DefaultStateMachine(classes)
        print
        test_model = _Model(state_machine, x, y)
        for s in test_model._lattice:
            print(s)
        x_dot_parameters = np.dot(x,
                                  parameters.T)  # Pre-compute the dot product
        actual_alpha = test_model._forward(x_dot_parameters)

        self.assertEqual(len(actual_alpha), len(expected_alpha))
        for key in sorted(expected_alpha.keys()):
            print(key, expected_alpha[key], actual_alpha[key])
            self.assertAlmostEqual(actual_alpha[key], expected_alpha[key])
Exemplo n.º 9
0
    def test_derivate_chain(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        # parameters =
        #0([[-8, -7],
        #1  [-6, -5],
        #2  [-4, -3],
        #3  [-2, -1],
        #4  [ 0,  1],
        #5  [ 2,  3],
        #6  [ 4,  5],
        #7  [ 6,  7]])
        x = np.array([[[0, 1], [1, 2]]], dtype='float64')
        y = 'a'
        state_machine = DefaultStateMachine(classes)
        test_model = _Model(state_machine, x, y)
        print(test_model._lattice)
        #
        # 0   01 --- 01
        #     0      1
        # states_to_classes = {0: 'a', 1: 'b'}
        # (0, 0, 0) :               exp(-7)
        # (0, 0, 0, 0, 1, 0, 4) :   exp(-7) * exp(2)
        # (0, 0, 1) :               exp(-5)
        # (0, 0, 1, 0, 1, 1, 5) :   exp(-5) * exp(8)
        # (0, 1, 0) :               exp(-7) * exp(2) * exp(-8 - 14)     = exp(-27)
        # (0, 1, 1) :               exp(-5) * exp(8) * exp(-6 - 10)     = exp(-13)
        # p(y|G,X) = f0(g00,g01,x00,x01,y) f1(g40,g41,x10,x11,y) f2(g00,g01,x00,x01,y)  +
        #            f0(g10,g11,x00,x01,y) f1(g50,g51,x10,x11,y) f2(g10,g11,x00,x01,y)
        # = exp(-27) / (exp(-27) + exp(-13))
        expected_ll = np.emath.log(np.exp(-27) / (np.exp(-27) + np.exp(-13)))
        expected_dll = np.zeros(parameters.shape)

        # Finite difference gradient approximation
        delta = 10.0**-7
        S, D = expected_dll.shape
        for s in range(S):
            for d in range(D):
                dg = np.zeros(parameters.shape)
                dg[s, d] = delta
                y0, _ = test_model.forward_backward(parameters)
                y1, _ = test_model.forward_backward(parameters + dg)
                print(s, d, y0, y1)
                expected_dll[s, d] = (y1 - y0) / delta

        actual_ll, actual_dll = test_model.forward_backward(parameters)

        print(expected_ll, actual_ll)
        print(expected_dll)
        print(actual_dll)
        self.assertAlmostEqual(actual_ll, expected_ll)
        assert_array_almost_equal(actual_dll,
                                  expected_dll,
                                  decimal=TEST_PRECISION)
Exemplo n.º 10
0
    def test_derivate_chain(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        # parameters =
        #0([[-8, -7],
        #1  [-6, -5],
        #2  [-4, -3],
        #3  [-2, -1],
        #4  [ 0,  1],
        #5  [ 2,  3],
        #6  [ 4,  5],
        #7  [ 6,  7]])
        x = np.array([[[0, 1],
                       [1, 2]]], dtype='float64')
        y = 'a'
        state_machine = DefaultStateMachine(classes)
        test_model = _Model(state_machine, x, y)
        print(test_model._lattice)
        #
        # 0   01 --- 01
        #     0      1
        # states_to_classes = {0: 'a', 1: 'b'}
        # (0, 0, 0) :               exp(-7)
        # (0, 0, 0, 0, 1, 0, 4) :   exp(-7) * exp(2)
        # (0, 0, 1) :               exp(-5)
        # (0, 0, 1, 0, 1, 1, 5) :   exp(-5) * exp(8)
        # (0, 1, 0) :               exp(-7) * exp(2) * exp(-8 - 14)     = exp(-27)
        # (0, 1, 1) :               exp(-5) * exp(8) * exp(-6 - 10)     = exp(-13)
        # p(y|G,X) = f0(g00,g01,x00,x01,y) f1(g40,g41,x10,x11,y) f2(g00,g01,x00,x01,y)  +
        #            f0(g10,g11,x00,x01,y) f1(g50,g51,x10,x11,y) f2(g10,g11,x00,x01,y)
        # = exp(-27) / (exp(-27) + exp(-13))
        expected_ll = np.emath.log(np.exp(-27) / (np.exp(-27) + np.exp(-13)))
        expected_dll = np.zeros(parameters.shape)

        # Finite difference gradient approximation
        delta = 10.0**-7
        S, D = expected_dll.shape
        for s in range(S):
            for d in range(D):
                dg = np.zeros(parameters.shape)
                dg[s, d] = delta
                y0, _ = test_model.forward_backward(parameters)
                y1, _ = test_model.forward_backward(parameters + dg)
                print(s, d, y0, y1)
                expected_dll[s, d] = (y1 - y0) / delta

        actual_ll, actual_dll = test_model.forward_backward(parameters)

        print(expected_ll, actual_ll)
        print(expected_dll)
        print(actual_dll)
        self.assertAlmostEqual(actual_ll, expected_ll)
        assert_array_almost_equal(actual_dll, expected_dll, decimal=TEST_PRECISION)
Exemplo n.º 11
0
    def test_forward_connected(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        # parameters =
        #0([[-8, -7],
        #1  [-6, -5],
        #2  [-4, -3],
        #3  [-2, -1],
        #4  [ 0,  1],
        #5  [ 2,  3],
        #6  [ 4,  5],
        #7  [ 6,  7]])
        x = np.array([[[0, 1],
                       [2, 1]],
                      [[0, 1],
                       [1, 0]]])
        y = 'a'
        expected_alpha = {
            (0, 0, 0): np.exp(-7),
            (0, 0, 0, 0, 1, 0, 4): np.exp(-7) * np.exp(1),
            (0, 0, 0, 1, 0, 0, 6): np.exp(-7) * np.exp(5),
            (0, 0, 0, 1, 1, 0, 2): np.exp(-7) * np.exp(-4),
            (0, 0, 1): np.exp(-5),
            (0, 0, 1, 0, 1, 1, 5): np.exp(-5) * np.exp(7),
            (0, 0, 1, 1, 0, 1, 7): np.exp(-5) * np.exp(7),
            (0, 0, 1, 1, 1, 1, 3): np.exp(-5) * np.exp(-2),
            (0, 1, 0): np.exp(-7) * np.exp(1) * np.exp(-23),
            (0, 1, 0, 1, 1, 0, 6): np.exp(-7) * np.exp(1) * np.exp(-23) * np.exp(4),
            (0, 1, 1): np.exp(-5) * np.exp(7) * np.exp(-17),
            (0, 1, 1, 1, 1, 1, 7): np.exp(-5) * np.exp(7) * np.exp(-17) * np.exp(6),
            (1, 0, 0): np.exp(-7) * np.exp(5) * np.exp(-7),
            (1, 0, 0, 1, 1, 0, 4): np.exp(-7) * np.exp(5) * np.exp(-7) * np.exp(0),
            (1, 0, 1): np.exp(-5) * np.exp(7) * np.exp(-5),
            (1, 0, 1, 1, 1, 1, 5): np.exp(-5) * np.exp(7) * np.exp(-5) * np.exp(2),
            (1, 1, 0): (np.exp(-11) + np.exp(-25) + np.exp(-9)) * np.exp(-8),
            (1, 1, 1): (np.exp(-1) + np.exp(-9) + np.exp(-7)) * np.exp(-6)
        }
        expected_alpha = {k: np.emath.log(v) for k, v in expected_alpha.items()}

        state_machine = DefaultStateMachine(classes)
        print
        test_model = _Model(state_machine, x, y)
        for s in test_model._lattice:
            print(s)
        x_dot_parameters = np.dot(x, parameters.T)  # Pre-compute the dot product
        actual_alpha = test_model._forward(x_dot_parameters)

        self.assertEqual(len(actual_alpha), len(expected_alpha))
        for key in sorted(expected_alpha.keys()):
            print(key, expected_alpha[key], actual_alpha[key])
            self.assertAlmostEqual(actual_alpha[key], expected_alpha[key])
Exemplo n.º 12
0
    def test_forward_backward_same_partition_value(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        x = np.array([[[0, 1],
                       [2, 1]],
                      [[0, 1],
                       [1, 0]]])
        y = 'a'
        state_machine = DefaultStateMachine(classes)
        test_model = _Model(state_machine, x, y)
        x_dot_parameters = np.dot(x, parameters.T)  # Pre-compute the dot product
        actual_alpha = test_model._forward(x_dot_parameters)
        actual_beta = test_model._backward(x_dot_parameters)

        print(actual_alpha[(1, 1, 0)], actual_beta[(0, 0, 0)])
        print(actual_alpha[(1, 1, 1)], actual_beta[(0, 0, 1)])
        self.assertAlmostEqual(actual_alpha[(1, 1, 0)], actual_beta[(0, 0, 0)] + (np.dot(x[0, 0, :], parameters[0, :])))
        self.assertAlmostEqual(actual_alpha[(1, 1, 1)], actual_beta[(0, 0, 1)] + (np.dot(x[0, 0, :], parameters[1, :])))
Exemplo n.º 13
0
    def test_backward_connected(self):
        parameters = np.array(range(-3, 3), dtype='float64').reshape((3, 2))
        # parameters =
        #0 ([[-3, -2],
        #1   [-1,  0],
        #2   [ 1,  2]])
        x = np.array([[[0, 1],
                       [2, 1]],
                      [[0, 1],
                       [1, 0]]])
        y = 'a'
        expected_beta = {
            (0, 0, 0): (np.exp(-4) + np.exp(-12)),  # * np.exp(-2),
            (0, 0, 0, 0, 1, 0, 1): np.exp(-3) * np.exp(1) * np.exp(-8),  # * np.exp(-2),
            (0, 0, 0, 1, 0, 0, 2): np.exp(-3) * np.exp(-1) * np.exp(-2),  # * np.exp(2),
            (0, 1, 0): np.exp(-3) * np.exp(1),  # * np.exp(-8),
            (0, 1, 0, 1, 1, 0, 2): np.exp(-3),  # * np.exp(1),
            (1, 0, 0): np.exp(-3) * np.exp(-1),  # * np.exp(-2),
            (1, 0, 0, 1, 1, 0, 1): np.exp(-3),  # * np.exp(-1),
            (1, 1, 0): 1.0  # np.exp(-3)
        }
        expected_beta = {k: np.emath.log(v) for k, v in expected_beta.items()}

        start_states = [0]
        transitions = [(0, 0, (0, 1)),
                       (0, 0, (1, 0))]
        states_to_classes = {0: 'a'}
        n_states = 1

        state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)

        test_model = _Model(state_machine, x, y)
        for s in test_model._lattice:
            print(s)
        x_dot_parameters = np.dot(x, parameters.T)  # Pre-compute the dot product
        actual_beta = test_model._backward(x_dot_parameters)
        print(actual_beta)

        print
        self.assertEqual(len(actual_beta), len(expected_beta))
        for key in sorted(expected_beta.keys(), reverse=True):
            print(key, expected_beta[key], actual_beta[key])
            self.assertAlmostEqual(actual_beta[key], expected_beta[key])
Exemplo n.º 14
0
    def test_backward_connected(self):
        parameters = np.array(range(-3, 3), dtype='float64').reshape((3, 2))
        # parameters =
        #0 ([[-3, -2],
        #1   [-1,  0],
        #2   [ 1,  2]])
        x = np.array([[[0, 1], [2, 1]], [[0, 1], [1, 0]]])
        y = 'a'
        expected_beta = {
            (0, 0, 0): (np.exp(-4) + np.exp(-12)),  # * np.exp(-2),
            (0, 0, 0, 0, 1, 0, 1):
            np.exp(-3) * np.exp(1) * np.exp(-8),  # * np.exp(-2),
            (0, 0, 0, 1, 0, 0, 2):
            np.exp(-3) * np.exp(-1) * np.exp(-2),  # * np.exp(2),
            (0, 1, 0): np.exp(-3) * np.exp(1),  # * np.exp(-8),
            (0, 1, 0, 1, 1, 0, 2): np.exp(-3),  # * np.exp(1),
            (1, 0, 0): np.exp(-3) * np.exp(-1),  # * np.exp(-2),
            (1, 0, 0, 1, 1, 0, 1): np.exp(-3),  # * np.exp(-1),
            (1, 1, 0): 1.0  # np.exp(-3)
        }
        expected_beta = {k: np.emath.log(v) for k, v in expected_beta.items()}

        start_states = [0]
        transitions = [(0, 0, (0, 1)), (0, 0, (1, 0))]
        states_to_classes = {0: 'a'}
        n_states = 1

        state_machine = GeneralStateMachine(start_states, transitions,
                                            states_to_classes)

        test_model = _Model(state_machine, x, y)
        for s in test_model._lattice:
            print(s)
        x_dot_parameters = np.dot(x,
                                  parameters.T)  # Pre-compute the dot product
        actual_beta = test_model._backward(x_dot_parameters)
        print(actual_beta)

        print
        self.assertEqual(len(actual_beta), len(expected_beta))
        for key in sorted(expected_beta.keys(), reverse=True):
            print(key, expected_beta[key], actual_beta[key])
            self.assertAlmostEqual(actual_beta[key], expected_beta[key])
Exemplo n.º 15
0
    def test_forward_backward_same_partition_value(self):
        classes = ['a', 'b']
        parameters = np.array(range(-8, 8), dtype='float64').reshape((8, 2))
        x = np.array([[[0, 1], [2, 1]], [[0, 1], [1, 0]]])
        y = 'a'
        state_machine = DefaultStateMachine(classes)
        test_model = _Model(state_machine, x, y)
        x_dot_parameters = np.dot(x,
                                  parameters.T)  # Pre-compute the dot product
        actual_alpha = test_model._forward(x_dot_parameters)
        actual_beta = test_model._backward(x_dot_parameters)

        print(actual_alpha[(1, 1, 0)], actual_beta[(0, 0, 0)])
        print(actual_alpha[(1, 1, 1)], actual_beta[(0, 0, 1)])
        self.assertAlmostEqual(
            actual_alpha[(1, 1, 0)],
            actual_beta[(0, 0, 0)] + (np.dot(x[0, 0, :], parameters[0, :])))
        self.assertAlmostEqual(
            actual_alpha[(1, 1, 1)],
            actual_beta[(0, 0, 1)] + (np.dot(x[0, 0, :], parameters[1, :])))
Exemplo n.º 16
0
    def test_forward_single(self):
        start_states = [0, 1]
        transitions = [(0, 0, (1, 1)), (0, 1, (0, 1)), (0, 0, (1, 0)),
                       (0, 2, lambda i, j, k: (0, 2))]
        states_to_classes = {0: 'a', 1: 'a', 2: 'b'}  # Dummy

        state_machine = GeneralStateMachine(start_states, transitions,
                                            states_to_classes)

        parameters = np.array(range(-7, 7), dtype='float64').reshape((7, 2))
        # parameters =
        # 0([[-7, -6],
        # 1  [-5, -4],
        # 2  [-3, -2],
        # 3  [-1,  0],
        # 4  [ 1,  2],
        # 5  [ 3,  4],
        # 6  [ 5,  6]])
        x = np.array([[[0, 1], [1, 0], [2, 1]], [[0, 1], [1, 0], [1, 0]]])
        y = 'a'
        # Expected lattice:
        #               #     ________
        # 1.  .  .      # 1  0  __0 - 21
        #               #    | /
        # 0.  .  .      # 0  0
        #  0  1  2      #    0    1    2
        expected_alpha = {
            (0, 0, 0):
            np.exp(-6),
            (0, 0, 0, 1, 0, 0, 5):
            np.exp(-6) * np.exp(4),
            (0, 0, 0, 1, 1, 0, 3):
            np.exp(-6) * np.exp(-1),
            (1, 0, 0):
            np.exp(-6) * np.exp(4) * np.exp(-6),
            (1, 0, 0, 1, 2, 2, 6):
            np.exp(-6) * np.exp(4) * np.exp(-6) * np.exp(5),
            (1, 1, 0):
            np.exp(-6) * np.exp(-1) * np.exp(-7),
            (1, 1, 0, 1, 2, 1, 4):
            np.exp(-6) * np.exp(-1) * np.exp(-7) * np.exp(1),
            (1, 2, 1):
            np.exp(-6) * np.exp(-1) * np.exp(-7) * np.exp(1) * np.exp(-5),
            (1, 2, 2):
            np.exp(-6) * np.exp(4) * np.exp(-6) * np.exp(5) * np.exp(-3)
        }
        expected_alpha = {
            k: np.emath.log(v)
            for k, v in expected_alpha.items()
        }
        test_model = _Model(state_machine, x, y)
        x_dot_parameters = np.dot(x,
                                  parameters.T)  # Pre-compute the dot product
        actual_alpha = test_model._forward(x_dot_parameters)

        actual_alpha = {
            k: v
            for k, v in actual_alpha.items() if not np.isneginf(v)
        }
        print(actual_alpha)

        self.assertEqual(len(actual_alpha), len(expected_alpha))
        print
        for key in sorted(expected_alpha.keys()):
            print(key, (expected_alpha[key]), (actual_alpha[key]))
            self.assertEqual(actual_alpha[key], expected_alpha[key])