示例#1
0
def test_feature_map_derivative2_d():
    X = np.array([[1.]])
    u = np.array([2.])
    omega = np.array([[2.]])
    d = 0
    phi_derivative2 = feature_map_derivative2_d(X, omega, u, d)
    phi_derivative2_manual = -feature_map(X, omega, u) * (omega[:, d]**2)
    assert_close(phi_derivative2, phi_derivative2_manual)
示例#2
0
def predict(x, alphas):
    D = len(x)
    f = 0.
    for i in range(len(alphas)):
        np.random.seed(seed_offset + i)
        omega, u = sample_basis(D=D, m=1, gamma=gamma)
        phi_x = feature_map(x, omega, u)
        f += alphas[i] * phi_x

    return f
示例#3
0
def test_feature_map_single_equals_feature_map():
    N = 10
    D = 20
    m = 3
    X = np.random.randn(N, D)
    omega = np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)

    phis = feature_map(X, omega, u)

    for i, x in enumerate(X):
        phi = feature_map_single(x, omega, u)
        assert_allclose(phis[i], phi)
示例#4
0
def test_feature_map_equals_scikit_learn():
    sigma = 2.
    gamma = sigma**2

    N = 10
    D = 20
    m = 3
    X = np.random.randn(N, D)
    np.random.seed(1)
    omega = sigma * np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)

    # make sure basis is the same
    np.random.seed(1)
    rbf_sampler = RBFSampler(gamma, m, random_state=1)
    rbf_sampler.fit(X)
    assert_allclose(rbf_sampler.random_weights_, omega)
    assert_allclose(rbf_sampler.random_offset_, u)

    phi_scikit = rbf_sampler.transform(X)
    phi_mine = feature_map(X, omega, u)

    assert_allclose(phi_scikit, phi_mine)
示例#5
0
    # estimate density in rkhs
    N = 800
    mu = np.zeros(D)
    Z = np.random.standard_t(df=nu, size=(N, D))
    lmbda = 0.0001
    sigma = 0.5
    gamma = 0.5 * (sigma**2)
    m = N

    omega = gamma * np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)
    logger.info("Estimating density")
    theta = score_matching_sym(Z, lmbda, omega, u)

    logq_est = lambda x: log_pdf_estimate(feature_map(x, omega, u), theta)
    dlogq_est = lambda x: log_pdf_estimate_grad(
        feature_map_grad_single(x, omega, u), theta)

    # momentum
    Sigma_p = np.eye(D)
    L_p = np.linalg.cholesky(Sigma_p)
    logp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=False, is_cholesky=True)
    dlogp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=True, is_cholesky=True)
    p_sample = lambda: sample_gaussian(
        N=1, mu=np.zeros(D), Sigma=L_p, is_cholesky=True)[0]

    # starting state
    p0 = p_sample()
示例#6
0
        omega, u = sample_basis(D=D, m=1, gamma=gamma)
        phi_x = feature_map(x, omega, u)
        f += alphas[i] * phi_x

    return f


alphas = np.zeros(num_iterations)
for i in range(num_iterations):
    logger.info("Iteration %d" % i)
    x = X[i]

    # sample random feature
    np.random.seed(seed_offset + i)
    omega, u = sample_basis(D=D, m=1, gamma=gamma)
    phi_x = feature_map(x, omega, u)

    # sample data point and predict
    f = predict(x, alphas[:i]) * phi_x

    # gradient of f at x
    f_grad = feature_map_grad_single(x, omega, u) * f

    # gradient
    grad = 0
    for d in range(D):
        phi_derivative_d = feature_map_derivative_d(x, omega, u, d)
        phi_derivative2_d = feature_map_derivative2_d(x, omega, u, d)

        grad += phi_derivative_d * f_grad[d] + phi_derivative2_d