Пример #1
0
def test_rff_feature_map_derivatives_equals_feature_map_derivatives_loop():
    N = 10
    D = 20
    m = 3
    X = np.random.randn(N, D)
    omega = np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)
    
    derivatives = rff_feature_map_grad(X, omega, u)
    derivatives_loop = rff_feature_map_grad_loop(X, omega, u)
    
    assert_allclose(derivatives_loop, derivatives)
Пример #2
0
def test_rff_feature_map_derivatives_equals_feature_map_derivatives_loop():
    N = 10
    D = 20
    m = 3
    X = np.random.randn(N, D)
    omega = np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)

    derivatives = rff_feature_map_grad(X, omega, u)
    derivatives_loop = rff_feature_map_grad_loop(X, omega, u)

    assert_allclose(derivatives_loop, derivatives)
Пример #3
0
def test_rff_feature_map_grad_theano_result_equals_manual():
    if not theano_available:
        raise SkipTest("Theano not available.")
      
    D = 2
    x = np.random.randn(D)
    X = x[np.newaxis, :]
    m = 10
    sigma = 1.
    omega, u = rff_sample_basis(D, m, sigma)
    grad_manual = rff_feature_map_grad(X, omega, u)[:, 0, :]
    
    for i in range(m):
        # phi_manual is a monte carlo average, so have to normalise by np.sqrt(m) here
        grad = rff_feature_map_comp_grad_theano(x, omega[:, i], u[i]) / np.sqrt(m)
        assert_close(grad, grad_manual[:, i])
Пример #4
0
def test_rff_feature_map_grad_theano_result_equals_manual():
    if not theano_available:
        raise SkipTest("Theano not available.")

    D = 2
    x = np.random.randn(D)
    X = x[np.newaxis, :]
    m = 10
    sigma = 1.
    omega, u = rff_sample_basis(D, m, sigma)
    grad_manual = rff_feature_map_grad(X, omega, u)[:, 0, :]

    for i in range(m):
        # phi_manual is a monte carlo average, so have to normalise by np.sqrt(m) here
        grad = rff_feature_map_comp_grad_theano(x, omega[:, i],
                                                u[i]) / np.sqrt(m)
        assert_close(grad, grad_manual[:, i])
Пример #5
0
def compute_C_memory(X, omega, u):
    assert len(X.shape) == 2
    Phi2 = rff_feature_map_grad(X, omega, u)
    d = X.shape[1]
    N = X.shape[0]
    m = Phi2.shape[2]

    #     # bottleneck! loop version is very slow
    #     C = np.zeros((m, m))
    #     t = time.time()
    #     for i in range(N):
    #         for ell in range(d):
    #             phi2 = Phi2[ell, i]
    #             C += np.outer(phi2, phi2)
    #     print("loop", time.time()-t)
    #
    #     # roughly 5x faster than the above loop
    #     t = time.time()
    #     Phi2_reshaped = Phi2.reshape(N*d, m)
    #     C2=np.einsum('ij,ik->jk', Phi2_reshaped, Phi2_reshaped)
    #     print("einsum", time.time()-t)
    #
    #     # cython implementation, is slowest
    #     t = time.time()
    #     Phi2_reshaped = Phi2.reshape(N*d, m)
    #     C3 = outer_sum_cython(Phi2_reshaped)
    #     print("cython", time.time()-t)

    #     t = time.time()

    # fastest version using multicore: tensordot method
    Phi2_reshaped = Phi2.reshape(N * d, m)
    C4 = np.tensordot(Phi2_reshaped, Phi2_reshaped, [0, 0])
    #     print("tensordot", time.time()-t)

    return C4 / N
Пример #6
0
def compute_C_memory(X, omega, u):
    assert len(X.shape) == 2
    Phi2 = rff_feature_map_grad(X, omega, u)
    d = X.shape[1]
    N = X.shape[0]
    m = Phi2.shape[2]
    
#     # bottleneck! loop version is very slow
#     C = np.zeros((m, m))
#     t = time.time()
#     for i in range(N):
#         for ell in range(d):
#             phi2 = Phi2[ell, i]
#             C += np.outer(phi2, phi2)
#     print("loop", time.time()-t)
#      
#     # roughly 5x faster than the above loop
#     t = time.time()
#     Phi2_reshaped = Phi2.reshape(N*d, m)
#     C2=np.einsum('ij,ik->jk', Phi2_reshaped, Phi2_reshaped)
#     print("einsum", time.time()-t)
#
#     # cython implementation, is slowest
#     t = time.time()
#     Phi2_reshaped = Phi2.reshape(N*d, m)
#     C3 = outer_sum_cython(Phi2_reshaped)
#     print("cython", time.time()-t)
    
#     t = time.time()

    # fastest version using multicore: tensordot method
    Phi2_reshaped = Phi2.reshape(N * d, m)
    C4 = np.tensordot(Phi2_reshaped, Phi2_reshaped, [0, 0])
#     print("tensordot", time.time()-t)

    return C4 / N