def make_gp(self, ripl):
        ripl.assume('make_lin',VentureFunction(makeLinear,[t.NumberType()], t.AnyType("VentureFunction")))
        ripl.assume('make_per',VentureFunction(makePeriodic,[t.NumberType(), t.NumberType(), t.NumberType()], t.AnyType("VentureFunction")))
        ripl.assume('a',"(tag (quote parameter) 0 (log  (uniform_continuous 0 8 ) ))")
        ripl.assume('sf',"(tag (quote parameter) 1 (log (uniform_continuous 0 8 ) ))")
        ripl.assume('p',"(tag (quote parameter) 2 (log (uniform_continuous 0.1 8) ))")
        ripl.assume('l',"(tag (quote parameter) 3 (log (uniform_continuous 0 8) ))")

        ripl.assume("func_times", makeLiftedMult(lambda x1, x2: np.multiply(x1,x2)))

        ripl.assume('gp',"""(tag (quote model) 0
                            (make_gp_part_der zero
                                (apply_function func_times
                                (apply_function make_per l p sf  )
                                (apply_function make_lin a ))))""")
Example #2
0
    def make_gp(self, ripl):
        ripl.assume('make_linear', VentureFunction(makeLinear, [t.NumberType(),t.IntegerType()], t.AnyType("VentureFunction")))
        ripl.assume('make_periodic', VentureFunction(makePeriodic, [t.NumberType(), t.NumberType(), t.NumberType(),t.IntegerType()], t.AnyType("VentureFunction")))
        ripl.assume('make_se',VentureFunction(makeSquaredExponential,[t.NumberType(), t.NumberType(),t.IntegerType()], t.AnyType("VentureFunction")))
        ripl.assume('make_rq', VentureFunction(makeRQ, [t.NumberType(), t.NumberType(), t.NumberType(),t.IntegerType()], t.AnyType("VentureFunction")))
 
     
        ripl.assume('a',' (tag (quote hyper ) 0 (uniform_continuous  0 8))')

        ripl.assume('l',' (tag (quote hyper) 1 (uniform_continuous  0 8))')
        ripl.assume('q',' (tag (quote  hyper) 2 (uniform_continuous  0.01 8))')
        ripl.assume('sf1','(tag (quote  hyper) 3 (uniform_continuous  0 8))')


        ripl.assume('theta_se_1',' (tag (quote hyper) 4 (uniform_continuous  0 8))')
        ripl.assume('theta_se_2',' (tag (quote hyper) 5 (uniform_continuous  0 8))')

        ripl.assume('theta_rq_1','(tag (quote hyper) 6 (uniform_continuous  0 8))')
        ripl.assume('theta_rq_2','(tag (quote hyper) 7 (uniform_continuous  0 8))')
        ripl.assume('theta_rq_3','(tag (quote hyper) 8 (uniform_continuous  0 8))')


        ripl.assume('lin', "(apply_function make_linear a  0 )")
        ripl.assume('per', "(apply_function make_periodic l q sf1 1 ) ")
        ripl.assume('se1', "(apply_function make_se theta_se_1 theta_se_2 2 )")
        ripl.assume('rq', "(apply_function make_rq theta_rq_1 theta_rq_2 theta_rq_3  7 )")
    
         #### GP Structure Prior

        ###### for simplicity, I start with the max amount of kernel per type given

        ripl.assume("func_times", makeLiftedMult(lambda x1, x2: np.multiply(x1,x2)))
        ripl.assume("func_plus", makeLiftedAdd(lambda x1, x2: x1 + x2))


        ripl.assume('cov_list','(list lin per se1 rq )')
        ripl.bind_foreign_sp("subset",typed_nr(Subset(), [t.ListType(),t.SimplexType()], t.ListType()))

        number = 4

        total_perms =0
        perms = []
        for i in range(number):
            perms.append((len(list(itertools.permutations([j for j in range(i+1)])))))
            total_perms+=perms[i]


        simplex = "( simplex  "
        for i in range(number):
            simplex+=str(float(perms[i])/total_perms) + " "

        simplex+=" )"
        ripl.assume('s','(tag (quote grammar) 1 (subset cov_list '+simplex + ' ))')
        ripl.assume('cov_compo',"""
         (tag (quote grammar) 0
             (lambda (l )
                (if (lte ( size l) 1)
                     (first l)
                         (if (flip)
                             (apply_function func_plus (first l) (cov_compo (rest l)))
                             (apply_function func_times (first l) (cov_compo (rest l)))
                    )
        )))
        """)


        ripl.assume('cov_structure','(cov_compo s)')
        ripl.assume('gp','(tag (quote model) 0 (make_gp_part_der zero cov_structure))')

        ripl.bind_foreign_sp("covariance_string",
                  deterministic_typed(lambda x:VentureSymbol(x.stuff['name']), [t.AnyType()], t.AnyType(),
                                      descr="returns the covariance type"))

        ripl.bind_foreign_sp("covariance_label",
                  deterministic_typed(lambda x:x.stuff['label_list'], [t.AnyType()], t.ArrayType(),
                                      descr="returns the covariance label"))
def __venture_start__(ripl, *_args):
    np.random.seed(3)
    random.seed(3)
    ripl.assume('make_linear',
                VentureFunctionDiff(makeLinear,
                                    [t.NumberType(),t.IntegerType()],
                                    t.AnyType("VentureFunction")))
    ripl.assume('make_periodic',
                VentureFunction(makePeriodic,
                                [t.NumberType(), t.NumberType(), t.NumberType(),t.IntegerType()],
                                t.AnyType("VentureFunction")))
    ripl.assume('make_squaredexp',
                VentureFunction(makeSquaredExponential,
                                [t.NumberType(), t.NumberType(),t.IntegerType()],
                                t.AnyType("VentureFunction")))
    ripl.assume('make_noise',
                VentureFunction(makeNoise,
                                [t.NumberType(),t.IntegerType()],
                                t.AnyType("VentureFunction")))
    ripl.assume('make_rq',
                VentureFunction(makeRQ,
                                [t.NumberType(), t.NumberType(), t.NumberType(),t.IntegerType()],
                                t.AnyType("VentureFunction")))

    ripl.assume('make_const_func', VentureFunction(makeConstFunc, [t.NumberType()], constantType))



    ripl.assume("mult_funcs", makeLiftedMult(lambda x1, x2: np.multiply(x1,x2)))
    ripl.assume("add_funcs", makeLiftedAdd(lambda x1, x2: x1 + x2))

    ripl.bind_foreign_sp('allocate_gpmem', gpmem.allocateGPmemSP)

    ## pseude-uniform structure prior
    def uniform_structure_prior(number):
        total_perms =0
        perms = []
        for i in range(number):
            perms.append((len(list(itertools.permutations([j for j in range(i+1)])))))
            total_perms+=perms[i]
        return [float(perms[i])/total_perms for i in range(number)]
    uniform_structure = deterministic_typed(uniform_structure_prior, [t.IntegerType()],t.SimplexType())
    ripl.bind_foreign_sp('uniform_structure', uniform_structure)

    ripl.bind_foreign_sp("subset",typed_nr(Subset(), [t.ListType(),t.SimplexType()], t.ListType()))

    if ripl.evaluate("data")=="synthetic":
       from get_synthetic_data import make_data_function,data_xs
    elif ripl.evaluate("data")=="airline":
       from get_airline_data import make_data_function,data_xs
    elif ripl.evaluate("data")=="co2":
       from get_co2_data import make_data_function,data_xs
    else:
       raise ValueError('Data is not known, please specify synthetic, airline or co2')

    ripl.bind_foreign_sp('make_data_function', deterministic_typed(
    make_data_function, [t.StringType()], sp.SPType([t.NumberType()], t.NumberType())))
    # helper SP to get the input data
    get_data_xs_SP = deterministic_typed(
        lambda: data_xs, [], t.HomogeneousArrayType(t.NumberType()))
    ripl.bind_foreign_sp('get_data_xs', get_data_xs_SP)

    # SPs to interpret covariance structure
    ripl.bind_foreign_sp("covariance_string",
        deterministic_typed(lambda x:VentureSymbol(x.stuff['name']), [t.AnyType()], t.AnyType(),
                            descr="returns the covariance type"))

    # SP to output covariance label so that we only infer over the
    # hyper-parameters of base kernels that are actually in use
    ripl.bind_foreign_sp("covariance_label",
        deterministic_typed(lambda x:x.stuff['label_list'], [t.AnyType()], t.ArrayType(),
                            descr="returns the covariance label"))
Example #4
0
def test_LINxSE_gp():
    y = [2.0, 3.0, 4.0 ]
    x = np.matrix([1.3, -2.0,0])

    ripl = init_gp_ripl()

    ripl.assume('make_se',VentureFunctionDiff(covs.makeSquaredExponential,[t.NumberType(), t.NumberType()], 
    t.AnyType("VentureFunctionDiff")))

    ripl.assume('make_linear',VentureFunctionDiff(covs.makeLinear,[t.NumberType()], 
    t.AnyType("VentureFunctionDiff")))

    print("""CAVEAT - we are setting the randseed with np, not with ripl.seed,
    since the latter does not work at the moment. This could cause problems in
    future versions of Venture""")

    np.random.seed(1) 
    ripl.assume("s","(tag (quote hyper) 0 (uniform_continuous 0 2 ))")
    ripl.assume("l","(tag (quote hyper) 1 (uniform_continuous 0 2 ))")

    ripl.assume("n","(tag (quote hyper) 2 (uniform_continuous 0 2 ))")

    ripl.assume("mult_funcs", covs.makeLiftedMult(lambda x1, x2: np.multiply(x1,x2)))

    ripl.assume("cov","""(apply_diff_function mult_funcs
				(apply_diff_function make_linear n)
				(apply_diff_function make_se s l)
			 )""")
    ripl.assume('gp',"(make_gp_part_der zero cov )")
    ripl.observe("(gp (array 1.3 -2.0 0))",array(y))


    old_state_s=ripl.sample("s")
    old_state_l=ripl.sample("l")
    old_state_n=ripl.sample("n")


    ripl.infer("(grad_ascent (quote hyper) all 1 1 1)")
    
    new_state_s=ripl.sample("s")
    new_state_l=ripl.sample("l")
    new_state_n=ripl.sample("n")

    k_lin  = covs.makeLinear(old_state_n)
    k_se  = covs.makeSquaredExponential(old_state_s,old_state_l)
    
    dfdn=k_lin.stuff['derivatives'][0]
    dfdsigma=k_se.stuff['derivatives'][0]
    dfdl=k_se.stuff['derivatives'][1]
    sn = 0.01
    K_lin = np.asmatrix(k_lin(x.T,x.T))
    K_se = np.asmatrix(k_se(x.T,x.T))
    K = K_lin * K_se + (sn*np.identity(x.shape[1]))
    Kinv = np.linalg.inv(K)
    alpha = Kinv * np.matrix(y).T
    
    #import pdb;pdb.set_trace()

    expected_step_s = 0.5 * np.trace((alpha*alpha.T - Kinv) * K_lin * np.asmatrix(apply_cov(dfdsigma)))
    expected_step_l = 0.5 * np.trace((alpha*alpha.T - Kinv) * K_lin * np.asmatrix(apply_cov(dfdl)))
    expected_step_n = 0.5 * np.trace((alpha*alpha.T - Kinv) * np.asmatrix(apply_cov(dfdn)) * K_se)

    np.testing.assert_almost_equal((new_state_s-old_state_s),expected_step_s,decimal=2,
    	err_msg=" kernel multiplication: gradient with respect to scale factor in SE is not correct")
    np.testing.assert_almost_equal((new_state_l-old_state_l),expected_step_l,
    	err_msg=" kernel multiplication: gradient with respect to length scale in SE is not correct")
    np.testing.assert_almost_equal((new_state_n-old_state_n),expected_step_n,decimal=3,
    	err_msg=" kernel multiplication: gradient with respect to scale factor in LIN  is not correct")