Exemplo n.º 1
0
def test_basic_batch_equality():
    """
    Test basic batch equality specification.
    """
    dims = [4, 8]
    for dim in dims:
        block_dim = int(dim/2)
        # Generate random configurations
        A = np.random.rand(block_dim, block_dim)
        B = np.random.rand(block_dim, block_dim)
        B = np.dot(B.T, B)
        D = np.random.rand(block_dim, block_dim)
        D = np.dot(D.T, D)
        tr_B_D = np.trace(B) + np.trace(D)
        B = B / tr_B_D
        D = D / tr_B_D
        As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
                basic_batch_equality(dim, A, B, D)
        tol = 1e-3
        eps = 1e-4
        N_rand = 10
        for (g, gradg) in zip(Gs, gradGs):
            for i in range(N_rand):
                X = np.random.rand(dim, dim)
                val = g(X)
                grad = gradg(X)
                print "grad:\n", grad
                num_grad = numerical_derivative(g, X, eps)
                print "num_grad:\n", num_grad
                assert np.sum(np.abs(grad - num_grad)) < tol
Exemplo n.º 2
0
def test_Q_constraints():
    import pdb, traceback, sys
    try:
        dims = [4, 8]
        N_rand = 10
        eps = 1e-4
        tol = 1e-3
        for dim in dims:
            block_dim = int(dim/4)
            # Generate initial data
            D = np.eye(block_dim)
            Dinv = np.linalg.inv(D)
            B = np.eye(block_dim)
            A = 0.5 * np.eye(block_dim)
            c = 0.5
            As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
                    Q_constraints(block_dim, A, B, D, c)
            N_rand = 10
            for (g, gradg) in zip(Gs, gradGs):
                for i in range(N_rand):
                    X = np.random.rand(dim, dim)
                    val = g(X)
                    grad = gradg(X)
                    print "grad:\n", grad
                    num_grad = numerical_derivative(g, X, eps)
                    print "num_grad:\n", num_grad
                    assert np.sum(np.abs(grad - num_grad)) < tol
    except:
        type, value, tb = sys.exc_info()
        traceback.print_exc()
        pdb.post_mortem(tb)
Exemplo n.º 3
0
def test_A_constraints():
    import pdb, traceback, sys
    try:
        dims = [4, 8]
        N_rand = 10
        eps = 1e-5
        tol = 1e-3
        np.set_printoptions(precision=3)
        for dim in dims:
            block_dim = int(dim/4)
            # Generate initial data
            D = np.eye(block_dim)
            Dinv = np.linalg.inv(D)
            Q = 0.5*np.eye(block_dim)
            mu = np.ones((block_dim, 1))
            As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
                    A_constraints(block_dim, D, Dinv, Q, mu)
            tol = 1e-3
            eps = 1e-4
            N_rand = 10
            for (g, gradg) in zip(Gs, gradGs):
                for i in range(N_rand):
                    X = np.random.rand(dim, dim)
                    val = g(X)
                    grad = gradg(X)
                    print "grad:\n", grad
                    num_grad = numerical_derivative(g, X, eps)
                    print "num_grad:\n", num_grad
                    assert np.sum(np.abs(grad - num_grad)) < tol
    except:
        type, value, tb = sys.exc_info()
        traceback.print_exc()
        pdb.post_mortem(tb)
Exemplo n.º 4
0
def test_A_dynamics():
    dims = [4, 8]
    N_rand = 10
    tol = 1e-3
    eps = 1e-4
    for dim in dims:
        block_dim = int(dim/4)
        (D_Q_cds, Dinv_cds, I_1_cds, I_2_cds,
            A_1_cds, A_T_1_cds, A_2_cds, A_T_2_cds) = A_coords(dim)

        # Generate initial data
        D = np.eye(block_dim)
        Q = 0.5*np.eye(block_dim)
        Qinv = np.linalg.inv(Q)
        C = 2*np.eye(block_dim)
        B = np.eye(block_dim)
        E = np.eye(block_dim)
        #B = np.eye(block_dim)
        def obj(X):
            return A_dynamics(X, block_dim, C, B, E, Qinv)
        def grad_obj(X):
            return grad_A_dynamics(X, block_dim, C, B, E, Qinv)
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            val = obj(X)
            grad = grad_obj(X)
            num_grad = numerical_derivative(obj, X, eps)
            diff = np.sum(np.abs(grad - num_grad))
            print "X:\n", X
            print "grad:\n", grad
            print "num_grad:\n", num_grad
            print "diff: ", diff
            assert diff < tol
Exemplo n.º 5
0
def test_log_det():
    dims = [4]
    N_rand = 10
    tol = 1e-3
    eps = 1e-4
    for dim in dims:
        block_dim = int(dim/4)
        (D_ADA_T_cds, I_1_cds, I_2_cds, R_1_cds, 
            D_cds, c_I_1_cds, c_I_2_cds, R_2_cds) = \
                Q_coords(block_dim)
        # Generate initial data
        B = np.random.rand(block_dim, block_dim)
        #B = np.eye(block_dim)
        def obj(X):
            return log_det_tr(X, B)
        def grad_obj(X):
            return grad_log_det_tr(X, B)
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            R1 = get_entries(X, R_1_cds)
            R2 = get_entries(X, R_2_cds)
            if (np.linalg.det(R1) <= 0 or np.linalg.det(R2) <= 0):
                "Continue!"
                continue
            val = obj(X)
            grad = grad_obj(X)
            num_grad = numerical_derivative(obj, X, eps)
            diff = np.sum(np.abs(grad - num_grad))
            if diff >= tol:
                print "grad:\n", grad
                print "num_grad:\n", num_grad
                print "diff: ", diff
            assert diff < tol
Exemplo n.º 6
0
def test7():
    """
    BROKEN: Check log-sum-exp gradient on many linear and nonlinear
    equalities.
    """
    tol = 1e-3
    eps = 1e-4
    N_rand = 10
    dims = [16]
    for dim in dims:
        As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
                stress_inequalities_and_equalities(dim)
        M = compute_scale(len(As), len(Cs), len(Fs), len(Gs), tol)
        def f(X):
            return log_sum_exp_penalty(X, M, As, bs, Cs, ds, Fs, Gs)
        def gradf(X):
            return log_sum_exp_grad_penalty(X, M, As,
                        bs, Cs, ds, Fs, gradFs, Gs, gradGs)
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            val = f(X)
            grad = gradf(X)
            print "grad:\n", grad
            num_grad = numerical_derivative(f, X, eps)
            print "num_grad:\n", num_grad
            diff = np.sum(np.abs(grad - num_grad))
            print "diff: ", diff
            assert diff < tol
Exemplo n.º 7
0
def test_tr():
    dims = [1, 5, 10]
    N_rand = 10
    tol = 1e-3
    eps = 1e-4
    for dim in dims:
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            val = trace_obj(X)
            grad = grad_trace_obj(X)
            num_grad = numerical_derivative(trace_obj, X, eps)
            assert np.sum(np.abs(grad - num_grad)) < tol
Exemplo n.º 8
0
def test_sum_squares():
    dims = [1, 5, 10]
    N_rand = 10
    tol = 1e-3
    eps = 1e-4
    for dim in dims:
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            val = neg_sum_squares(X)
            grad = grad_neg_sum_squares(X)
            num_grad = numerical_derivative(neg_sum_squares, X, eps)
            assert np.sum(np.abs(grad - num_grad)) < tol
Exemplo n.º 9
0
def test_quadratic_inequality():
    """
    Test quadratic inequality specification.
    """
    dim, As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
            quadratic_inequality()
    tol = 1e-3
    eps = 1e-4
    N_rand = 10
    for (f, gradf) in zip(Fs, gradFs):
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            val = f(X)
            grad = gradf(X)
            print "grad:\n", grad
            num_grad = numerical_derivative(f, X, eps)
            print "num_grad:\n", num_grad
            assert np.sum(np.abs(grad - num_grad)) < tol
Exemplo n.º 10
0
def test_l2_batch_equals():
    """
    Test l2_batch_equals operation.
    """
    dims = [4, 16]
    N_rand = 10
    eps = 1e-4
    tol = 1e-3
    for dim in dims:
        block_dim = int(dim/2)
        A = np.random.rand(block_dim, block_dim)
        coord = (0, block_dim, 0, block_dim)
        def f(X):
            return l2_batch_equals(X, A, coord)
        def gradf(X):
            return grad_l2_batch_equals(X, A, coord)
        A = np.dot(A.T, A)
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            val = f(X)
            grad = gradf(X)
            num_grad = numerical_derivative(f, X, eps)
            assert np.sum(np.abs(grad - num_grad)) < tol
Exemplo n.º 11
0
def test4():
    """
    Check log-sum-exp gradient on quadratic equality problem.
    """
    dim, As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
           quadratic_equality()
    tol = 1e-3
    M = compute_scale(len(As), len(Cs), len(Fs), len(Gs), tol)
    def f(X):
        return log_sum_exp_penalty(X, M, As, bs, Cs, ds, Fs, Gs)
    def gradf(X):
        return log_sum_exp_grad_penalty(X, M, As,
                    bs, Cs, ds, Fs, gradFs, Gs, gradGs)
    eps = 1e-4
    N_rand = 10
    for i in range(N_rand):
        X = np.random.rand(dim, dim)
        val = f(X)
        grad = gradf(X)
        print "grad:\n", grad
        num_grad = numerical_derivative(f, X, eps)
        print "num_grad:\n", num_grad
        assert np.sum(np.abs(grad - num_grad)) < tol
Exemplo n.º 12
0
def test8():
    """
    Check log-sum-exp gradient on basic batch equalities
    """
    tol = 1e-3
    eps = 1e-5
    N_rand = 10
    dims = [16]
    for dim in dims:
        block_dim = int(dim/2)
        # Generate random configurations
        A = np.random.rand(block_dim, block_dim)
        B = np.random.rand(block_dim, block_dim)
        B = np.dot(B.T, B)
        D = np.random.rand(block_dim, block_dim)
        D = np.dot(D.T, D)
        tr_B_D = np.trace(B) + np.trace(D)
        B = B / tr_B_D
        D = D / tr_B_D
        As, bs, Cs, ds, Fs, gradFs, Gs, gradGs = \
                basic_batch_equality(dim, A, B, D)
        M = compute_scale(len(As), len(Cs), len(Fs), len(Gs), tol)
        def f(X):
            return log_sum_exp_penalty(X, M, As, bs, Cs, ds, Fs, Gs)
        def gradf(X):
            return log_sum_exp_grad_penalty(X, M, As,
                        bs, Cs, ds, Fs, gradFs, Gs, gradGs)
        for i in range(N_rand):
            X = np.random.rand(dim, dim)
            val = f(X)
            grad = gradf(X)
            print "grad:\n", grad
            num_grad = numerical_derivative(f, X, eps)
            print "num_grad:\n", num_grad
            diff = np.sum(np.abs(grad - num_grad))
            print "diff: ", diff
            assert diff < tol