def main(): #order=0 #lscale = 10 # example python fitx.py LPAZ amp lscale order sta = sys.argv[1] target = sys.argv[2] lscale = int(sys.argv[3]) order = int(sys.argv[4]) n_azi_buckets = int(sys.argv[5]) param_var=10 X = np.loadtxt(os.path.join(base1, "X_%s_%s.txt" % (target, sta))) y = np.loadtxt(os.path.join(base1, "y_%s_%s.txt" % (target, sta))) good_idx = X[:, 2] < 15 X = X[good_idx, :] y = y[good_idx] cv_dir = os.path.join(base1, "cv_%s_%s" % (target, sta)) mkdir_p(cv_dir) nstd = get_nstd(X, y, order, n_azi_buckets, param_var) plot_gp(X, y, nstd, lscale, order, n_azi_buckets, param_var, cv_dir)
def main(n_max=18000): rundir = sys.argv[1] task_name = sys.argv[2] basedir = os.path.join(os.getenv('SIGVISA_HOME'), 'papers', 'product_tree', 'run', rundir) basename = os.path.join(basedir, task_name) X_train = np.loadtxt(basename + '_X_train.txt', delimiter=',') y_train = np.loadtxt(basename + '_y_train.txt', delimiter=',') n_X = X_train.shape[0] if n_X > n_max: X_train = np.array(X_train[:n_max,:], copy=True) y_train = np.array(y_train[:n_max], copy=True) print "using restricted subset of %d training points" % (n_max) actual_n = min(n_X, n_max) X_test = np.loadtxt(basename + '_X_test.txt', delimiter=',') y_test = np.loadtxt(basename + '_y_test.txt', delimiter=',') """ #for nu in (90,): nu = 90 csficbase = basename + '_csfic_%d' % nu if nu is not None else basename + '_csfic' csficmodel_dir = basename + "_py_csfic_%d" % nu if nu is not None else basename + "_py_csfic" print csficmodel_dir #if not os.path.exists(csficbase): # continue csgp = os.path.join(csficmodel_dir, 'trained_%d.gp' % actual_n) mkdir_p(csficmodel_dir) if os.path.exists(csgp): gp_csfic = SparseGP_CSFIC(fname=csgp, build_tree=True, leaf_bin_size=15) else: gp_csfic = load_matlab_csficmodel(csficbase, X_train, y_train) gp_csfic.save_trained_model(csgp) #print "testing predictions" #test_predict(csficmodel_dir, sgp=gp_csfic, testX=X_test, testy=y_test) #print "testing cutoff rule 0" #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic, cutoff_rule=0) #print "testing cutoff rule 1" #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic, cutoff_rule=1) #print "testing cutoff rule 2" #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic, cutoff_rule=2) print "testing leaf bins 15" gp_csfic10 = SparseGP_CSFIC(fname=csgp, build_tree=True, leaf_bin_size=10) eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic10, cutoff_rule=2, flag="_bin15") #print "testing leaf bins 100" #gp_csfic50 = SparseGP_CSFIC(fname=csgp, build_tree=True, leaf_bin_size=50) #eval_gp(bdir=csficmodel_dir, testX=X_test, test_n=200, gp=gp_csfic50, cutoff_rule=2, flag="_bin50") """ semodel_dir = basename + "_py_se" segp = os.path.join(semodel_dir, 'trained.gp') mkdir_p(semodel_dir) if os.path.exists(segp): gp_se = SparseGP(fname=segp, build_tree=True, leaf_bin_size=0, sparse_invert=False) else: gp_se = load_matlab_semodel(basename, X_train, y_train) gp_se.save_trained_model(segp) test_predict(semodel_dir, sgp=gp_se, testX=X_test, testy=y_test) eval_gp(bdir=semodel_dir, testX=X_test, test_n=200, gp=gp_se)