def toy_f(x, var=1E-2): return x[0]**2 + var * np.random.randn(1) init_pt = 5 * np.random.randn(20) ntrials = 20 maxit = 250 f_avr = np.zeros(maxit + 1) #set equal to number of iterations + 1 for trial in range(ntrials): #sim setup test = Stars_sim(toy_f, init_pt, L1=2.0, var=1E-4, verbose=False, maxit=maxit) test.STARS_only = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() #update average of f f_avr += test.fhist f2_avr = np.zeros(maxit + 1) for trial in range(ntrials):
#np.random.seed(9) init_pt = np.zeros(f.dim) #prior mean #init_pt /= np.linalg.norm(init_pt) ntrials = 3 maxit = 500 f_avr = np.zeros(maxit+1) f2_avr = np.zeros(maxit+1) trial_final = np.zeros(f.dim) # STARS no sphere for trial in range(ntrials): #sim setup test = Stars_sim(f, init_pt, L1 = None, var = None, verbose = False, maxit = maxit) test.STARS_only = True test.get_mu_star() test.get_h() test.update_L1 = True test2 = Stars_sim(f, init_pt, L1 = None, var = None, verbose = True, maxit = maxit) test2.update_L1 = True #test.STARS_only = True test2.get_mu_star() test2.get_h() test2.train_method = 'GQ' test2.adapt = 2*f.dim test2.regul = test2.var test2.pad_train = 2.0 test2.explore_weight = 2.0 #test2.regul = None
for loop in range(7): if loop != 0: #append new data new_pts = np.random.randn(train_size,dim) train_set = np.vstack((train_set,new_pts)) print('training data size',train_set.shape) #train active subspace f_data = toy_f(train_set) print('data size', f_data.shape) #don't normalize sub_sp.compute(X=train_set,f=f_data,sstype='QPHD') #usual threshold adim = find_active(sub_sp.eigenvals,sub_sp.eigenvecs) print('Subspace Distance',subspace_dist(true_as,sub_sp.eigenvecs[:,0:adim])) test = Stars_sim(toy_f, init_pt, L1 = 2.0, var = 1E-4, verbose = False, maxit = train_size*3) test.STARS_only = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() if test.iter > (dim+2)*(dim+1)//4: #compute active subspace train_x = np.hstack((test.xhist[:,0:test.iter+1],test.yhist[:,0:test.iter])) train_f = np.hstack((test.fhist[0:test.iter+1],test.ghist[0:test.iter])) train_x = train_x.T sub_sp.compute(X=train_x,f=train_f,sstype='QPHD') adim = find_active(sub_sp.eigenvals,sub_sp.eigenvecs) print('Subspace Distance',subspace_dist(true_as,sub_sp.eigenvecs[:,0:adim]))
#maxit random points as training data train_set = np.random.randn(dim, maxit) f_data = toy_f(train_set) print('data size', f_data.shape) #don't normalize sub_sp.compute(X=train_set.T, f=f_data, sstype='QPHD') #usual threshold adim = find_active(sub_sp.eigenvals, sub_sp.eigenvecs) print('Subspace Distance', subspace_dist(true_as, sub_sp.eigenvecs[:, 0:adim])) for trial in range(ntrials): #sim setup test = Stars_sim(toy_f, init_pt, L1=2.0, var=sigma**2, verbose=False, maxit=maxit) test.STARS_only = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() train_x = np.hstack( (test.xhist[:, 0:test.iter + 1], test.yhist[:, 0:test.iter])) train_f = np.hstack((test.fhist[0:test.iter + 1], test.ghist[0:test.iter])) train_x = train_x.T sub_sp.compute(X=train_x, f=train_f, sstype='QPHD') adim = find_active(sub_sp.eigenvals, sub_sp.eigenvecs)
return mag * (np.dot(weights, x))**2 + sig * np.random.randn(1) our_L1 = 2.0 * mag * dim init_pt = np.random.randn(dim) ntrials = 500 maxit = 200 f_avr = np.zeros(maxit + 1) #set equal to number of iterations + 1 for trial in range(ntrials): #sim setup test = Stars_sim(toy_f, init_pt, L1=our_L1, var=our_var, verbose=False, maxit=maxit) test.STARS_only = True test.update_L1 = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() #update average of f f_avr += test.fhist print('STARS trial', trial, ' minval', test.fhist[-1]) f2_avr = np.zeros(maxit + 1)
init_pt = np.random.randn(dim) print(nesterov_2_f(init_pt)) ntrials = 10 maxit = 10000 f_avr = np.zeros(maxit + 1) #set equal to number of iterations + 1 for trial in range(ntrials): #sim setup test = Stars_sim(nesterov_2_f, init_pt, L1=4, var=1E-12, verbose=False, maxit=maxit) test.STARS_only = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() #if test.iter % 100 == 0: #print('iter',test.iter,test.fhist[test.iter]) #update average of f f_avr += test.fhist f2_avr = np.zeros(maxit + 1)
ntrials = 20 maxit = 600 dim = f.dim f_avr = np.zeros(maxit+1) # Start the clock! start = timeit.default_timer() # STARS for trial in range(ntrials): test = Stars_sim(f, this_init_pt, L1 = f.L1, var = f.var, verbose = False, maxit = maxit) test.STARS_only = True test.get_mu_star() test.get_h() while test.iter < test.maxit: test.step() #update average of f f_avr += test.fhist print('STARS trial',trial,' minval',test.fhist[-1]) a_dims = [2] n_a_dims = np.size(a_dims) f2_avr = np.zeros((maxit+1,n_a_dims)) j=0
dim = f.dim #np.random.seed(9) #init_pt = f.initscl*np.random.randn(dim) init_pt = np.ones(dim) ntrials = f.ntrials maxit = f.maxit f_avr = np.zeros(maxit+1) f2_avr = np.zeros(maxit+1) f3_avr = np.zeros(maxit+1) f4_avr = np.zeros(maxit+1) # STARS for trial in range(ntrials): #sim setup test = Stars_sim(f, init_pt, L1 = f.L1, var = f.var, verbose = False, maxit = maxit) test.STARS_only = True test.get_mu_star() test.get_h() # STARS steps while test.iter < test.maxit: test.step() #update average of f f_avr += test.fhist # FAASTARS (3 scenarios: no extensions, adaptive thresholding, and active subcycling) for trial in range(ntrials): #sim setup
#prinp.random.seed(9) init_pt = np.ones(f.dim) init_pt /= np.linalg.norm(init_pt) ntrials = 30 maxit = 900 f_avr = np.zeros(maxit+1) f_av2 = np.copy(f_avr) # STARS, no weights for trial in range(ntrials): #sim setup test = Stars_sim(f, init_pt, L1 = f.L1, var = f.var, verbose = False, maxit = maxit) #test.STARS_only = True test.get_mu_star() test.get_h() test.train_method = 'GQ' test.threshold = .999 # do 100 steps while test.iter < test.maxit: test.step() #update average of f f_avr += test.fhist # data dump
#print('initial first component',noisy_pred[:,0]) #print('current first component',pred[:,0]) return rms_loss(weights, pred, data) #stars setup maxit = 500 init_pt = np.hstack((init_weights, noisy_pred.flatten())) ntrials = 1 f_avr = np.zeros(maxit + 1) #set equal to number of iterations + 1 for trial in range(ntrials): #sim setup test = Stars_sim(stars_wrapper, init_pt, L1=400.0, var=1E-4, verbose=False, maxit=maxit) test.STARS_only = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() #update average of f f_avr += test.fhist print('STARS min', test.x[0:10]) f2_avr = np.zeros(maxit + 1)
f_avr = np.zeros(maxit + 1) #initialize storage for data dump STARS_f_sto = np.zeros((maxit + 1, 1)) STARS_x_sto = np.zeros((1, dim)) ASTARS_f_sto = np.zeros((maxit + 1, 1)) ASTARS_x_sto = np.zeros((1, dim)) FAASTARS_f_sto = np.zeros((maxit + 1, 1)) FAASTARS_x_sto = np.zeros((1, dim)) # STARS for trial in range(ntrials): #sim setup test = Stars_sim(f, init_pt, L1=f.L1, var=f.var, verbose=False, maxit=maxit) test.STARS_only = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() #update average of f f_avr += test.fhist # data dump #STARS_f_sto = np.hstack((STARS_f_sto, np.transpose([test.fhist]))) #STARS_x_sto = np.vstack((STARS_x_sto,np.transpose(test.xhist)))
#test rbf on random normal samples lin_samp = np.random.normal(size=(15,10)) f_data = testfun(lin_samp.T) ss,lin_surrog=train_rbf(lin_samp,f_data,noise=1E-4) print('Active subspace from linear rbf',ss.eigenvecs) quad_samp = np.random.normal(size=(90,10)) fq_data = testfun(quad_samp.T) ss2,quad_surrog = train_rbf(quad_samp,fq_data,noise=1E-4) init_pt=np.random.rand(10) stars_test=Stars_sim(testfun,init_pt,L1=2.0,var=1E-2,maxit=80) stars_test.get_mu_star() stars_test.get_h() while stars_test.iter < stars_test.maxit: stars_test.step() stars_test.compute_active() print('Active Variables after STARS run',stars_test.active) print('Active Weights',stars_test.wts) plt.semilogy(stars_test.fhist) plt.figure() plt.plot(stars_test.xhist[0,:]) plt.plot(stars_test.xhist[-1,:])
""" import numpy as np import astars print(dir(astars)) from astars.stars_sim import Stars_sim def testfun(x): return x[0]**2 + x[-1]**2 + np.random.normal(scale=.01) test = True for trials in range(99): init_pt = np.random.rand(10, 1) stars_test = Stars_sim(testfun, init_pt, L1=2.0, var=1E-2) stars_test.get_mu_star() stars_test.get_h() while stars_test.iter < stars_test.maxit: stars_test.STARS_step() #Error Bound for additive noise error_bound = (4 * stars_test.L1 * (stars_test.dim + 4) / (101) * np.linalg.norm(init_pt)**2 + 3 * np.sqrt(2) / 5 * np.sqrt(stars_test.var) * (stars_test.dim + 4)) err = np.mean(stars_test.fhist) if err > error_bound: print('Unit test failed', err) test = False
#init_pt[6]+=.08 #init_pt[7]+=2.5 #init_pt[8]+=1700 #init_pt[9]+=.025 print(init_pt) ntrials = 2 maxit = 200 f_avr = np.zeros(maxit+1) #set equal to number of iterations + 1 for trial in range(ntrials): #sim setup test = Stars_sim(wing_barrier, init_pt, L1 = 200, var = 1E-4, verbose = True, maxit = maxit) test.STARS_only = True test.debug = True test.get_mu_star() test.get_h() # do 100 steps while test.iter < test.maxit: test.step() #if np.isnan(test.x).any: # print(test.xhist[:,0:test.iter+1],test.yhist[:,0:test.iter+1],test.fhist[0:test.iter+1],test.ghist[0:test.iter+1]) # print(test.x) # raise SystemExit('nan in current iterate') #update average of f f_avr += test.fhist
init_pt = np.ones(dci.dim) #prior mean #init_pt /= np.linalg.norm(init_pt) ntrials = 1 maxit = 100 f_avr = np.zeros(maxit + 1) f2_avr = np.zeros(maxit + 1) f3_avr = np.zeros(maxit + 1) trial_final = np.zeros(dci.dim) # STARS no sphere for trial in range(ntrials): #sim setup test = Stars_sim(dci2, init_pt, L1=dci2.L1, var=dci2.var, verbose=False, maxit=maxit) test.STARS_only = True test.get_mu_star() test.get_h() test2 = Stars_sim(dci, init_pt, f_obj=dci, L1=dci.L1, var=dci.var, verbose=True, maxit=maxit) #test.STARS_only = True test2.get_mu_star() test2.get_h()
STARS_f_sto = np.zeros((maxit + 1, ntrials)) STARS_x_sto = np.zeros((1, dim)) STARS_L1_sto = np.zeros((maxit + 1, ntrials)) STARS_var_sto = np.zeros(ntrials) FAASTARS_f_sto = np.zeros((maxit + 1, ntrials)) FAASTARS_x_sto = np.zeros((1, dim)) FAASTARS_L1_sto = np.zeros((maxit + 1, ntrials)) FAASTARS_var_sto = np.zeros(ntrials) for trial in range(ntrials): #sim setup test = Stars_sim(f, init_pt, L1=None, var=None, verbose=False, maxit=maxit, true_as=f.active, train_method='GQ') test.STARS_only = True print('Inital L1', test.L1) print('Inital var', test.var) test.update_L1 = True test.get_mu_star() test.get_h() # do training steps while test.iter < test.tr_stop: test.step() #update average of f and save for start of astars...?