def test_noise_gradient():
  '''
  Tests the gradient of the noise covariance 
  WN :math:`=\sigma^2 \delta_{x,x^\prime}`
  '''
  f  = covs.makeNoise(2.1)
  f_prime=f.stuff['derivatives'][0]
  expected = 2 * 2.1 * np.identity(3)
  assert expected.dtype.type is np.float64, "noise gradient is not of type float64" 
  np.testing.assert_almost_equal(apply_cov(f_prime), expected)
def test_SEplusWN_gp():
    y = [2.0, 3.0, 4.0 ]
    x = np.matrix([1.3, -2.0,0])

    ripl = init_gp_ripl()

    ripl.assume('make_se',VentureFunctionDiff(covs.makeSquaredExponential,[t.NumberType(), t.NumberType()], 
    t.AnyType("VentureFunction")))

    ripl.assume('make_noise',VentureFunctionDiff(covs.makeNoise,[t.NumberType()], 
    t.AnyType("VentureFunction")))

    print("""CAVEAT - we are setting the randseed with np, not with ripl.seed,
    since the latter does not work at the moment. This could cause problems in
    future versions of Venture""")

    np.random.seed(1) 
    ripl.assume("s","(tag (quote hyper) 0 (uniform_continuous 0 2 ))")
    ripl.assume("l","(tag (quote hyper) 1 (uniform_continuous 0 2 ))")

    ripl.assume("n","(tag (quote hyper) 2 (uniform_continuous 1 5 ))")

    ripl.assume("add_funcs", covs.makeLiftedAdd(lambda x1, x2: x1 + x2))
    ripl.assume("cov","""(apply_diff_function add_funcs
				(apply_diff_function make_se s l)
				(apply_diff_function make_noise n)
			 )""")
    ripl.assume('gp',"(make_gp_part_der zero cov )")
    ripl.observe("(gp (array 1.3 -2.0 0))",array(y))


    old_state_s=ripl.sample("s")
    old_state_l=ripl.sample("l")
    old_state_n=ripl.sample("n")


    ripl.infer("(grad_ascent (quote hyper) all 1 1 1)")
    
    new_state_s=ripl.sample("s")
    new_state_l=ripl.sample("l")
    new_state_n=ripl.sample("n")

    k_se  = covs.makeSquaredExponential(old_state_s,old_state_l)
    k_wn  = covs.makeNoise(old_state_n)
    dfdsigma=k_se.stuff['derivatives'][0]
    dfdl=k_se.stuff['derivatives'][1]
    dfdn=k_wn.stuff['derivatives'][0]
    #import pdb;pdb.set_trace()
    sn = 0.01
    K = k_se(x.T,x.T) + k_wn(x.T,x.T) + (sn*np.identity(x.shape[1]))
    Kinv = np.linalg.inv(K)
    alpha = Kinv * np.matrix(y).T
    

    expected_step_s = 0.5 * np.trace((alpha*alpha.T - Kinv) * np.asmatrix(apply_cov(dfdsigma)))
    expected_step_l = 0.5 * np.trace((alpha*alpha.T - Kinv) * np.asmatrix(apply_cov(dfdl)))
    expected_step_n = 0.5 * np.trace((alpha*alpha.T - Kinv) * np.asmatrix(apply_cov(dfdn)))

    np.testing.assert_almost_equal((new_state_s-old_state_s),expected_step_s,
	err_msg="kernel addition: gradient with respect to scale factor in SE is not correct")
    np.testing.assert_almost_equal((new_state_l-old_state_l),expected_step_l,
	err_msg="kernel addition: gradient with respect to length scale in SE is not correct")
    np.testing.assert_almost_equal((new_state_n-old_state_n),expected_step_n,
	err_msg="kernel addition: gradient with respect to noise factor in WN  is not correct")