示例#1
0
def Bound2(phi_0, phi_1, phi_2, sigma_noise, K_mm, mean_y):
    # Preliminary Bound
    beta = 1 / tf.square(sigma_noise)
    bound = 0
    N = h.get_dim(mean_y, 0)
    M = h.get_dim(K_mm, 0)
    W_inv_part = beta * phi_2 + K_mm
    global phi_200
    phi_200 = tf.matrix_solve(W_inv_part, tf.transpose(phi_1))
    W = beta * np.eye(N) - tf.square(beta) * h.Mul(
        phi_1, tf.matrix_solve(W_inv_part, tf.transpose(phi_1)))
    # Computations
    bound += N * tf.log(beta)
    bound += h.log_det(K_mm + 1e-3 * np.eye(M))
    bound -= h.Mul(tf.transpose(mean_y), W, mean_y)
    global matrix_determinant
    matrix_determinant = tf.ones(
        1
    )  #h.log_det(W_inv_part+1e2*np.eye(M))#-1e-40*tf.exp(h.log_det(W_inv_part))

    bound -= h.log_det(W_inv_part +
                       1e-3 * tf.reduce_mean(W_inv_part) * np.eye(M))
    bound -= beta * phi_0
    bound += beta * tf.trace(tf.cholesky_solve(tf.cholesky(K_mm), phi_2))
    bound = bound * 0.5
    return bound
示例#2
0
def build_psi_stats_rbf(Z, noise_2,lenSc, mu, S):
    #Check Consistent Shapes
    assert(h.get_dim(mu,0)==h.get_dim(S,0))

    # expecting below note S is the squared covariance

    '''
    Q = 3
    N = 4
    M = 2



    Z=tf.transpose(Z)
    mu=tf.transpose(mu)
    S=tf.transpose(S)
    ### works! Yippee
    mu=tf.random_uniform([N,Q], minval=0, maxval=10, dtype=tf.float64)
    S=tf.random_uniform([N,Q], minval=0, maxval=10, dtype=tf.float64)
    Z=tf.random_uniform([M,Q], minval=0, maxval=10, dtype=tf.float64)
    var=tf.constant(1.0,dtype=tf.float64)
    lengthscale2=tf.constant([3.0,2.0,1.0],dtype=tf.float64)
    '''

    var=noise_2
    lengthscale2 = tf.square(lenSc)


    # psi0
    N = tf.shape(mu)[0]
    psi0 = tf.cast(N, tf.float32) * var
    # psi1
    psi1_logdenom = tf.expand_dims(tf.reduce_sum(tf.log(S / lengthscale2 + 1.), 1), 1)  # N x 1
    d = tf.square(tf.expand_dims(mu, 1)-tf.expand_dims(Z, 0))  # N x M x Q
    psi1_log = - 0.5 * (psi1_logdenom + tf.reduce_sum(d/tf.expand_dims(S+lengthscale2, 1), 2))
    psi1 = var * tf.exp(psi1_log)

    # psi2
    psi2_logdenom = -0.5 * tf.expand_dims(tf.reduce_sum(tf.log(2.*S/lengthscale2 + 1.), 1), 1)  # N # 1
    psi2_logdenom = tf.expand_dims(psi2_logdenom, 1)
    psi2_exp1 = 0.25 * tf.reduce_sum(tf.square(tf.expand_dims(Z, 1)-tf.expand_dims(Z, 0))/lengthscale2, 2)  # M x M
    psi2_exp1 = tf.expand_dims(psi2_exp1, 0)

    Z_hat = 0.5 * (tf.expand_dims(Z, 1) + tf.expand_dims(Z, 0))  # MxMxQ
    denom = 1./(2.*S+lengthscale2)
    a = tf.expand_dims(tf.expand_dims(tf.reduce_sum(tf.square(mu)*denom, 1), 1), 1)  # N x 1 x 1
    b = tf.reduce_sum(tf.expand_dims(tf.expand_dims(denom, 1), 1) * tf.square(Z_hat), 3)  # N M M
    c = -2*tf.reduce_sum(tf.expand_dims(tf.expand_dims(mu*denom, 1), 1) * Z_hat, 3)  # N M M
    psi2_exp2 = a + b + c

    psi2 = tf.square(var) * tf.reduce_sum(tf.exp(psi2_logdenom - psi2_exp1 - psi2_exp2), 0)
    return psi0, psi1, psi2
示例#3
0
def predict(K_mn,sigma,K_mm,K_nn,Ytr):
    # predicitions
    N=h.get_dim(K_nn,0)
    K_nm=tf.transpose(K_mn)
    Sig_Inv=1e-1*np.eye(M)+K_mm+K_mnnm_2/tf.square(sigma)
    mu_post=h.Mul(tf.matrix_solve(Sig_Inv,K_mn),Ytr)/tf.square(sigma)
    mean=h.Mul(K_nm,mu_post)
    variance=K_nn-h.Mul(K_nm,h.safe_chol(K_mm,K_mn))+h.Mul(K_nm,tf.matrix_solve(Sig_Inv,K_mn))
    var_terms=2*tf.sqrt(tf.reshape(tf.diag_part(variance)+tf.square(sigma),[N,1]))
    return mean, var_terms
示例#4
0
def Bound1(y,S,Kmm,Knm,Tr_Knn,sigma):
#matrices to be used
    Kmm_chol=tf.cholesky(Kmm)
    sig_2=tf.square(sigma)
    N=h.get_dim(y,0)
    Q_nn=h.Mul(Knm,tf.cholesky_solve(Kmm_chol,tf.transpose(Knm)))
    Q_I_chol=tf.cholesky(sig_2*np.eye(N)+Q_nn)
    bound=-0.5*(Tr_Knn-Q_nn)/sig_2
    bound+=h.multivariate_normal(y, tf.zeros([N,1],dtype=tf.float32), Q_I_chol)
    bound-=0.5*tf.reduce_sum(S)/sig_2+0.1*0.5*tf.reduce_sum(tf.log(S))
    return bound
示例#5
0
def Bound1(y, S, Kmm, Knm, Tr_Knn, sigma):
    #matrices to be used
    Kmm_chol = tf.cholesky(Kmm)
    sig_2 = tf.square(sigma)
    N = h.get_dim(y, 0)
    Q_nn = h.Mul(Knm, tf.cholesky_solve(Kmm_chol, tf.transpose(Knm)))
    Q_I_chol = tf.cholesky(sig_2 * np.eye(N) + Q_nn)
    bound = -0.5 * (Tr_Knn - Q_nn) / sig_2
    bound += h.multivariate_normal(y, tf.zeros([N, 1], dtype=tf.float32),
                                   Q_I_chol)
    bound -= 0.5 * tf.reduce_sum(S) / sig_2 + 0.1 * 0.5 * tf.reduce_sum(
        tf.log(S))
    return bound
示例#6
0
def Bound2(phi_0,phi_1,phi_2,sigma_noise,K_mm,mean_y):
    # Preliminary Bound
    beta=1/tf.square(sigma_noise)
    bound=0
    N=h.get_dim(mean_y,0)
    M=h.get_dim(K_mm,0)
    W_inv_part=beta*phi_2+K_mm
    global phi_200
    phi_200=tf.matrix_solve(W_inv_part,tf.transpose(phi_1))
    W=beta*np.eye(N)-tf.square(beta)*h.Mul(phi_1,tf.matrix_solve(W_inv_part,tf.transpose(phi_1)))
    # Computations
    bound+=N*tf.log(beta)
    bound+=h.log_det(K_mm+1e-3*np.eye(M))
    bound-=h.Mul(tf.transpose(mean_y),W,mean_y)
    global matrix_determinant
    matrix_determinant=tf.ones(1) #h.log_det(W_inv_part+1e2*np.eye(M))#-1e-40*tf.exp(h.log_det(W_inv_part))


    bound-=h.log_det(W_inv_part+1e-3*tf.reduce_mean(W_inv_part)*np.eye(M))
    bound-=beta*phi_0
    bound+=beta*tf.trace(tf.cholesky_solve(tf.cholesky(K_mm),phi_2))
    bound=bound*0.5
    return bound
示例#7
0
def build_psi_stats_rbf(Z, noise_2, lenSc, mu, S):
    #Check Consistent Shapes
    assert (h.get_dim(mu, 0) == h.get_dim(S, 0))

    # expecting below note S is the squared covariance
    '''
    Q = 3
    N = 4
    M = 2



    Z=tf.transpose(Z)
    mu=tf.transpose(mu)
    S=tf.transpose(S)
    ### works! Yippee
    mu=tf.random_uniform([N,Q], minval=0, maxval=10, dtype=tf.float64)
    S=tf.random_uniform([N,Q], minval=0, maxval=10, dtype=tf.float64)
    Z=tf.random_uniform([M,Q], minval=0, maxval=10, dtype=tf.float64)
    var=tf.constant(1.0,dtype=tf.float64)
    lengthscale2=tf.constant([3.0,2.0,1.0],dtype=tf.float64)
    '''

    var = noise_2
    lengthscale2 = tf.square(lenSc)

    # psi0
    N = tf.shape(mu)[0]
    psi0 = tf.cast(N, tf.float32) * var
    # psi1
    psi1_logdenom = tf.expand_dims(
        tf.reduce_sum(tf.log(S / lengthscale2 + 1.), 1), 1)  # N x 1
    d = tf.square(tf.expand_dims(mu, 1) - tf.expand_dims(Z, 0))  # N x M x Q
    psi1_log = -0.5 * (psi1_logdenom + tf.reduce_sum(
        d / tf.expand_dims(S + lengthscale2, 1), 2))
    psi1 = var * tf.exp(psi1_log)

    # psi2
    psi2_logdenom = -0.5 * tf.expand_dims(
        tf.reduce_sum(tf.log(2. * S / lengthscale2 + 1.), 1), 1)  # N # 1
    psi2_logdenom = tf.expand_dims(psi2_logdenom, 1)
    psi2_exp1 = 0.25 * tf.reduce_sum(
        tf.square(tf.expand_dims(Z, 1) - tf.expand_dims(Z, 0)) / lengthscale2,
        2)  # M x M
    psi2_exp1 = tf.expand_dims(psi2_exp1, 0)

    Z_hat = 0.5 * (tf.expand_dims(Z, 1) + tf.expand_dims(Z, 0))  # MxMxQ
    denom = 1. / (2. * S + lengthscale2)
    a = tf.expand_dims(
        tf.expand_dims(tf.reduce_sum(tf.square(mu) * denom, 1), 1),
        1)  # N x 1 x 1
    b = tf.reduce_sum(
        tf.expand_dims(tf.expand_dims(denom, 1), 1) * tf.square(Z_hat),
        3)  # N M M
    c = -2 * tf.reduce_sum(
        tf.expand_dims(tf.expand_dims(mu * denom, 1), 1) * Z_hat, 3)  # N M M
    psi2_exp2 = a + b + c

    psi2 = tf.square(var) * tf.reduce_sum(
        tf.exp(psi2_logdenom - psi2_exp1 - psi2_exp2), 0)
    return psi0, psi1, psi2
示例#8
0
文件: dgp3.py 项目: blutooth/gp
def sample(mu, Sigma):
        N=h.get_dim(mu,0)
        rand=h.Mul(tf.cholesky(Sigma+1e-1*np.eye(N)),tf.random_normal([N,1]))+mu
        return rand
示例#9
0
文件: simpleopt.py 项目: blutooth/gp
 def test_get_dim(self):
     A1=np.array([[1,2],[1,5],[3,4]]); A=tf.constant(A1,dtype=tf.float32)
     self.assertEqual(h.get_dim(A,0),3)
     self.assertEqual(h.get_dim(A,1),2)
示例#10
0
def sample(mu, Sigma):
    N = h.get_dim(mu, 0)
    rand = h.Mul(tf.cholesky(Sigma + 1e-1 * np.eye(N)), tf.random_normal(
        [N, 1])) + mu
    return rand