def test_lin_gradient():
    '''
    Tests the gradient for the linear covariance function
    LIN  :math:`=\sigma^2 (x x^\prime)`
    '''
    #df/dsigma
    f  = covs.makeLinear(2.1)
    dfdsigma=f.stuff['derivatives'][0]
    expected_sigma = 2 * 2.1 * 1.3 * (-2)
    computed_d_sigma =apply_cov_scalar(dfdsigma)

    assert not np.isnan(computed_d_sigma)

    assert  computed_d_sigma.dtype.type is np.float64, """ computed partial derivative
    for sigma is not of type np.float64"""

    np.testing.assert_almost_equal(computed_d_sigma,expected_sigma )
def test_LINxSE_gp():
    y = [2.0, 3.0, 4.0 ]
    x = np.matrix([1.3, -2.0,0])

    ripl = init_gp_ripl()

    ripl.assume('make_se',VentureFunctionDiff(covs.makeSquaredExponential,[t.NumberType(), t.NumberType()], 
    t.AnyType("VentureFunctionDiff")))

    ripl.assume('make_linear',VentureFunctionDiff(covs.makeLinear,[t.NumberType()], 
    t.AnyType("VentureFunctionDiff")))

    print("""CAVEAT - we are setting the randseed with np, not with ripl.seed,
    since the latter does not work at the moment. This could cause problems in
    future versions of Venture""")

    np.random.seed(1) 
    ripl.assume("s","(tag (quote hyper) 0 (uniform_continuous 0 2 ))")
    ripl.assume("l","(tag (quote hyper) 1 (uniform_continuous 0 2 ))")

    ripl.assume("n","(tag (quote hyper) 2 (uniform_continuous 0 2 ))")

    ripl.assume("mult_funcs", covs.makeLiftedMult(lambda x1, x2: np.multiply(x1,x2)))

    ripl.assume("cov","""(apply_diff_function mult_funcs
				(apply_diff_function make_linear n)
				(apply_diff_function make_se s l)
			 )""")
    ripl.assume('gp',"(make_gp_part_der zero cov )")
    ripl.observe("(gp (array 1.3 -2.0 0))",array(y))


    old_state_s=ripl.sample("s")
    old_state_l=ripl.sample("l")
    old_state_n=ripl.sample("n")


    ripl.infer("(grad_ascent (quote hyper) all 1 1 1)")
    
    new_state_s=ripl.sample("s")
    new_state_l=ripl.sample("l")
    new_state_n=ripl.sample("n")

    k_lin  = covs.makeLinear(old_state_n)
    k_se  = covs.makeSquaredExponential(old_state_s,old_state_l)
    
    dfdn=k_lin.stuff['derivatives'][0]
    dfdsigma=k_se.stuff['derivatives'][0]
    dfdl=k_se.stuff['derivatives'][1]
    sn = 0.01
    K_lin = np.asmatrix(k_lin(x.T,x.T))
    K_se = np.asmatrix(k_se(x.T,x.T))
    K = K_lin * K_se + (sn*np.identity(x.shape[1]))
    Kinv = np.linalg.inv(K)
    alpha = Kinv * np.matrix(y).T
    
    #import pdb;pdb.set_trace()

    expected_step_s = 0.5 * np.trace((alpha*alpha.T - Kinv) * K_lin * np.asmatrix(apply_cov(dfdsigma)))
    expected_step_l = 0.5 * np.trace((alpha*alpha.T - Kinv) * K_lin * np.asmatrix(apply_cov(dfdl)))
    expected_step_n = 0.5 * np.trace((alpha*alpha.T - Kinv) * np.asmatrix(apply_cov(dfdn)) * K_se)

    np.testing.assert_almost_equal((new_state_s-old_state_s),expected_step_s,decimal=2,
    	err_msg=" kernel multiplication: gradient with respect to scale factor in SE is not correct")
    np.testing.assert_almost_equal((new_state_l-old_state_l),expected_step_l,
    	err_msg=" kernel multiplication: gradient with respect to length scale in SE is not correct")
    np.testing.assert_almost_equal((new_state_n-old_state_n),expected_step_n,decimal=3,
    	err_msg=" kernel multiplication: gradient with respect to scale factor in LIN  is not correct")