コード例 #1
0
ファイル: sarcos.py プロジェクト: davmre/treegp
def main():

    sarcos_dir = os.path.join(os.getenv("SIGVISA_HOME"), "papers", "product_tree", "sarcos")

    sarcos_train_X, sarcos_train_y, sarcos_test_X, sarcos_test_y, hyperparams = load_sarcos(sdir=sarcos_dir)
    np.save(os.path.join(sarcos_dir, "testX.npy"), sarcos_test_X)
    np.save(os.path.join(sarcos_dir, "testy.npy"), sarcos_test_y)
    np.save(os.path.join(sarcos_dir, "hyperparams.npy"), hyperparams)
    print "loaded sarcos data and converted to numpy format"

    train_realdata_gp(sarcos_dir, sarcos_train_X, sarcos_train_y, hyperparams)
    print "trained model"
    test_predict(sarcos_dir)
    print "evaluated predictions"

    eval_gp(bdir=sarcos_dir, test_n=100)
    print "timings finished"
コード例 #2
0
ファイル: fitz.py プロジェクト: davmre/treegp
def cov_timing(cv_dir, lscale, order, n_azi_buckets):
    sgp = SparseGP(fname=os.path.join(cv_dir, "fold_00.gp%d_%d_%d" % (lscale, order, n_azi_buckets)))
    X = np.loadtxt(os.path.join(cv_dir, "X.txt"))
    test_idx = np.array([int(z) for z in np.loadtxt(os.path.join(cv_dir, "fold_00_test.txt"))])
    testX = X[test_idx]
    resultfile = os.path.join(cv_dir, "results_gp%d_%d_%d.txt" % (lscale, order, n_azi_buckets))
    errorfile = os.path.join(cv_dir, "error_gp%d_%d_%d.npz" % (lscale, order, n_azi_buckets))
    eval_gp(gp=sgp, testX=testX, resultfile=resultfile, errorfile=errorfile)

    poly = LinearBasisModel(fname=os.path.join(cv_dir, "fold_00.poly_%d_%d" % (order, n_azi_buckets)))
    resultfile_poly = os.path.join(cv_dir, "results_poly_%d_%d.txt" % (order, n_azi_buckets))
    f = open(resultfile_poly, 'w')
    test_n = len(test_idx)
    poly_covars = np.zeros((test_n,))
    t0 = time.time()
    for i in range(test_n):
        poly_covars[i] = poly.covariance(testX[i:i+1,:])
    t1 = time.time()
    f.write("cov time %f\n" % ((t1-t0)/test_n))
    f.close()
コード例 #3
0
ファイル: plot_fitz.py プロジェクト: davmre/treegp
def learn_gp(X, y):

    p = np.random.permutation(len(y))
    train_n = int(len(y) * 0.2)
    trainX = X[p[:train_n], :]
    trainy = y[p[:train_n]]
    testX = X[p[train_n:], :]
    testy = y[p[train_n:]]

    fitz_dir = os.path.join(os.getenv("SIGVISA_HOME"), "papers", "product_tree", "fitz_learned")

    # hyperparams = np.array([0.5,  3.0, 50.0,  50.0], dtype=float)
    # hyperparams = learn_hyperparams(fitz_dir, trainX, trainy, dfn_str='lld', hyperparams=hyperparams, sparse_invert=False, basisfns = [lambda x : 1,], param_cov=np.array(((10000,),)), param_mean = np.array((0,)), k=1000)

    # print "got hyperparams", hyperparams

    # hyperparams = np.array([1.16700753,    2.53145332,  212.46536884,157.68719303], dtype=float)

    np.save(os.path.join(fitz_dir, "testX.npy"), testX)
    np.save(os.path.join(fitz_dir, "testy.npy"), testy)
    np.save(os.path.join(fitz_dir, "hyperparams.npy"), hyperparams)

    print "loaded data"

    train_realdata_gp(
        fitz_dir,
        trainX,
        trainy,
        hyperparams=hyperparams,
        sparse_invert=False,
        basisfns=[lambda x: 1],
        param_cov=np.array(((10000,),)),
        param_mean=np.array((0,)),
        dfn_str="lld",
    )
    test_predict(fitz_dir)
    eval_gp(bdir=fitz_dir, test_n=100)
コード例 #4
0
def main(n_max=18000):

    rundir = sys.argv[1]
    task_name = sys.argv[2]



    basedir = os.path.join(os.getenv('SIGVISA_HOME'), 'papers', 'product_tree', 'run', rundir)
    basename = os.path.join(basedir, task_name)
    X_train = np.loadtxt(basename + '_X_train.txt', delimiter=',')
    y_train = np.loadtxt(basename + '_y_train.txt',  delimiter=',')

    n_X = X_train.shape[0]
    if n_X > n_max:
        X_train = np.array(X_train[:n_max,:], copy=True)
        y_train = np.array(y_train[:n_max], copy=True)
        print "using restricted subset of %d training points" % (n_max)
    actual_n = min(n_X, n_max)


    X_test = np.loadtxt(basename + '_X_test.txt',  delimiter=',')
    y_test = np.loadtxt(basename + '_y_test.txt',  delimiter=',')

    """
    #for nu in (90,):
    nu = 90
    csficbase = basename + '_csfic_%d' % nu if nu is not None else basename + '_csfic'
    csficmodel_dir = basename + "_py_csfic_%d" % nu if nu is not None else basename + "_py_csfic"
    print csficmodel_dir
    #if not os.path.exists(csficbase):
    #    continue
    csgp = os.path.join(csficmodel_dir, 'trained_%d.gp' % actual_n)

    mkdir_p(csficmodel_dir)
    if os.path.exists(csgp):
        gp_csfic = SparseGP_CSFIC(fname=csgp, build_tree=True, leaf_bin_size=15)
    else:
        gp_csfic = load_matlab_csficmodel(csficbase, X_train, y_train)
        gp_csfic.save_trained_model(csgp)

    #print "testing predictions"
    #test_predict(csficmodel_dir, sgp=gp_csfic, testX=X_test, testy=y_test)

    #print "testing cutoff rule 0"
    #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic, cutoff_rule=0)
    #print "testing cutoff rule 1"
    #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic, cutoff_rule=1)
    #print "testing cutoff rule 2"
    #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic, cutoff_rule=2)

    print "testing leaf bins 15"
    gp_csfic10 = SparseGP_CSFIC(fname=csgp, build_tree=True, leaf_bin_size=10)
    eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic10, cutoff_rule=2, flag="_bin15")

    #print "testing leaf bins 100"
    #gp_csfic50 = SparseGP_CSFIC(fname=csgp, build_tree=True, leaf_bin_size=50)
    #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic50, cutoff_rule=2, flag="_bin50")



    """
    semodel_dir = basename + "_py_se"
    segp = os.path.join(semodel_dir, 'trained.gp')
    mkdir_p(semodel_dir)
    if os.path.exists(segp):
        gp_se = SparseGP(fname=segp, build_tree=True, leaf_bin_size=0, sparse_invert=False)
    else:
        gp_se = load_matlab_semodel(basename, X_train, y_train)
        gp_se.save_trained_model(segp)

    test_predict(semodel_dir, sgp=gp_se, testX=X_test, testy=y_test)
    eval_gp(bdir=semodel_dir, testX=X_test, test_n=200, gp=gp_se)